id
stringlengths
10
10
title
stringlengths
3
179
track
stringclasses
1 value
status
stringclasses
3 values
keywords
stringlengths
2
2.39k
primary_area
stringclasses
21 values
author
stringclasses
501 values
authorids
stringclasses
501 values
aff
stringclasses
1 value
aff_domain
stringclasses
1 value
position
stringclasses
1 value
rating
stringclasses
355 values
confidence
stringlengths
0
19
soundness
stringclasses
642 values
contribution
stringclasses
596 values
presentation
stringclasses
782 values
rating_avg
float64
0
9
confidence_avg
float64
0
5
soundness_avg
float64
0
4
contribution_avg
float64
0
4
presentation_avg
float64
0
4
corr_rating_confidence
float64
-1
1
project
stringclasses
1 value
github
stringclasses
1 value
Review
listlengths
2
10
00SnKBGTsz
DataEnvGym: Data Generation Agents in Teacher Environments with Student Feedback
main
Active
iterative data generation;llm agent;lifelong learning
foundation or frontier models, including LLMs
5;6;6;8
4;3;4;4
2;2;4;3
3;3;4;3
3;3;2;2
6.25
3.75
2.75
3.25
2.5
0.132453
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Is it possible to implement a random-policy baseline where you randomly chose a set of (naturally collected) datapoints from a data pool? The no-state baseline has flavor of this baseline but LLM-informed decisions could be biased. \n- Is it possible to compare this approach with active learning, in which instead of doing data generation, you do data *selection* and ask models to generate only synthetic labels, but not synthetic inputs?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- Tackle a timely and interesting problem. \n- Provide the necessary infrastructure for the community to study the problem, opening up opportunities for future contributions. \n- Consider various data generation strategies,\n- Well-desgined experiments which demonstrate the effectiveness of the proposed approaches and conduct insightful analyses." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces Gym environments for data synthesis, framing the problem as sequential decision-making. In these environments, actions correspond to data-generation plans, and states represent the performance summary of a student model. The paper implements environments for three tasks: visual question answering (VQA), math, and code generation. Each environment offers three state representations: open-ended, skill-list, and skill-tree. Additionally, it proposes an LLM-based policy for data generation. Experimental results demonstrate that the LLM can make strategically effective choices based on environment-state information." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* The paper is currently dense and difficult to follow. The introduction includes excessive implementation details, which detract from providing a simple, high-level intuition. Using a specific task example to guide readers through the core concepts would make the paper more accessible.\n\n* The paper focuses solely on the data generation plan rather than a full, end-to-end data generation process. It relies on a fixed, off-the-shelf data-generation engine that cannot be modified. The authors should admit this limitation and discuss potential strategies for overcoming it.\n\n* The quality of the data-generation engine can impact both student performance and the data-generation plan itself. Current approaches do not take into account the data-generation engine capabilities in the design of the policy or the evaluation of the student. For instance, poor student performance might result from the engine producing low-quality data on a specific skill, which could prompt the policy to avoid querying the engine for that skill.\n\n* The learning procedure can be resource-intensive. The authors should report the time, cost, and computing resources used for the experiments." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "In the Experiments section, the authors mention that the baseline student model should not have been heavily post-trained so that there are rooms for further improvements. However, it would be beneficial to provide additional evidence and details to support the claim that the student's performance is improved due to the added data points rather than insufficient training. For instance, the training protocol involved a fixed 10-epoch training period; it remains unclear whether the model had reached convergence within this timeframe or if the introduction of new data points accelerated convergence. Further clarification on this aspect would enhance the overall validity of the results.\n\nAlso the result would be more sound if more quantitative and qualitative results for skill discovery is reported in this paper." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper presents a novel and insightful perspective on the autonomous data generation problem, leveraging principles from reinforcement learning to conceptualize it as a sequential decision-making process. The authors provide a thorough explanation of this approach, the motivations behind and the underlying mechanics.\n\nThis paper proposed a modular framework/testbed that can be easily adapted to various tasks, showcasing its versatility and potential for widespread applicability. The authors demonstrate the effectiveness of their approach through experiments on 3 tasks of multiple modalities, including text, image, and code generation, yielding promising early results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a modular system for automated data generation, designed to minimize the need for human annotations. The proposed approach employs a reinforcement learning-inspired methodology, decomposing the process into a sequence of action predictions (data generation policy) based on state information (feedback from model errors) in an iterative manner. The effectiveness of this approach is demonstrated through three diverse tasks, encompassing text, image, and code generation across different modalities." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The experiment part should be conducted more thoroughly: specifically, creating a test set that incorporates newly generated data points from the data generation agent and reporting evaluation results for each retrained model over successive iterations would provide more comprehensive insights into the system's performance." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- How does the performance of the data generation agents change over longer iterations? The paper truncates experiments when performance increases, but it would be insightful to explore whether performance plateaus or continuously increase over extended training.\n- Is the total training data allocation fixed in each environment, or does it vary dynamically? The methodology mentions rebalancing but lacks clarity on how these allocations adjust adaptively based on feedback." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- Good contribution to automated data generation for model improvement.\n- Clearly written with structured sections explaining each environment type and experimental results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents DataEnvGym, a framework designed to simulate environments for data generation agents. These agents iteratively generate synthetic data to address weaknesses in student models, aiming to improve model performance across tasks like mathematics, programming, and visual question answering. DataEnvGym provides various structured environments (Open-Ended, Skill-List, and Skill-Tree) where data generation agents create targeted training examples based on feedback from the student model, offering a dynamic approach to automated model improvement." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper should clarify early on that the focus is on synthetic data generation for training purposes, as this underpins the motivation for the approach.\n- Important related works on algorithms using feedback from training to generate the next training environments are missing [1, 2, 3, 4].\n- Lines 460 - 465, I believe there is a typo whereby it says that “each experiment is truncated once the performance consistently decreases for multiple iterations”. Should it be “increases”?\n- Repeated runs of experiments without confidence intervals will be valuable, especially since the variance of performance seems to be very high.\n\n[1] Sudhakaran, S., González-Duque, M., Freiberger, M., Glanois, C., Najarro, E., & Risi, S. (2024). Mariogpt: Open-ended text2level generation through large language models. Advances in Neural Information Processing Systems, 36.\n[2] Todd, G., Earle, S., Nasir, M. U., Green, M. C., & Togelius, J. (2023, April). Level generation through large language models. In Proceedings of the 18th International Conference on the Foundations of Digital Games (pp. 1-8).\n[3] Zhang, J., Lehman, J., Stanley, K., & Clune, J. (2023). Omni: Open-endedness via models of human notions of interestingness. arXiv preprint arXiv:2306.01711.\n[4] Faldor, M., Zhang, J., Cully, A., & Clune, J. (2024). OMNI-EPIC: Open-endedness via Models of human Notions of Interestingness with Environments Programmed in Code. arXiv preprint arXiv:2405.15568." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "Yes, Discrimination / bias / fairness concerns", "Yes, Potentially harmful insights, methodologies and applications" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Limited Evaluation of Agent Architectures: The paper primarily focuses on introducing the DataEnvGym environment, but the evaluation of data generation agents is limited to relatively simple baseline policies. Exploring more sophisticated agent architectures, such as reinforcement learning agents (e.g., using policy gradient methods, Q-learning) or agents incorporating larger language models for planning and decision-making (similar to the approaches used in Shimabucoro et al. (2024), would substantially strengthen the paper. A systematic comparison of different agent architectures in terms of their effectiveness in improving student models, their sample efficiency, and their computational cost would provide valuable insights and contribute to a better understanding of the challenges and opportunities in automated data generation.\n\nLimited Analysis of Skill Discovery Quality: The paper briefly discusses the impact of oracle skills on student performance but doesn't delve deeply into the quality of the skills discovered by the proposed LLM-based method. A more thorough analysis is needed to understand the strengths and limitations of the skill discovery module. This could involve quantitative measures of skill quality, such as measuring their coherence, coverage, and relevance to the target task, or qualitative analysis by human experts. Investigating how the quality of the discovered skills affects the performance of the data generation agents and the resulting student models would strengthen the paper's contribution. Exploring alternative skill discovery methods (e.g., clustering-based approaches, topic modeling) and comparing their effectiveness with the proposed method would further enhance the analysis.\n\nLack of Comparison with Existing Methods: The paper positions DataEnvGym as a novel approach for model improvement, but it lacks a direct comparison with existing methods like curriculum learning (Bengio et al., 2009) or active learning (Settles, 2009). Evaluating how DataEnvGym compares to these established techniques in terms of student model performance, data efficiency, and computational cost would provide valuable context and highlight the advantages of the proposed framework. This would also clarify the specific niche and contribution of DataEnvGym within the broader landscape of model improvement techniques.\n\nLimited Discussion of Scalability: The experiments in the paper are conducted with relatively small datasets and models. It's essential to address the scalability of DataEnvGym to more realistic scenarios involving larger datasets, more complex models, and a broader range of skills. Discussing the computational challenges and potential optimizations for scaling the framework to more demanding settings would strengthen the paper's practical relevance. For instance, how can the computational cost of LLM-based data generation be reduced while maintaining data quality? How can the skill discovery and agent training processes be optimized for larger datasets? Addressing these questions would provide valuable insights for future research and practical applications." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Novel Problem: Automating data generation to improve models is a significant challenge with practical applications. This work directly addresses this problem with a novel approach.\n\nWell-Defined Framework: DataEnvGym is presented as a well-defined framework with clear components (trainer, evaluator, data generation policy, data generation engine) and different levels of structure (open-ended, skill-list, skill-tree). This structure makes the problem tractable and facilitates modular development and testing.\n\nMultiple Tasks and Domains: The inclusion of experiments across diverse tasks (mathematics, programming, visual question answering) and with different student models demonstrates the generalizability of the framework.\n\nPromising Results: The initial results showing improved student model performance across tasks and environments are encouraging and suggest the potential of this approach. The analysis of difficulty/rarity and training dynamics adds value.\n\nOpen-Source Release: The commitment to publicly releasing the code and leaderboard promotes reproducibility and encourages further research in this area." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces DataEnvGym, a novel testbed of teacher environments for developing data generation agents that iteratively improve student models by generating targeted training data. DataEnvGym frames data generation as a sequential decision-making task where an agent, comprising a data generation policy and engine, interacts with an environment that provides feedback from a student model. The agent's goal is to improve student model performance by generating training data based on student feedback (errors or weak skills). DataEnvGym offers multiple instantiations of teacher environments across three levels of structure: open-ended, skill-list, and skill-tree, each with varying levels of scaffolding support. Experiments across text and image-based tasks (mathematics, programming, and visual question answering) demonstrate that example agents within DataEnvGym can iteratively improve student model performance. Furthermore, the authors analyze the impact of state information, environment structure, and skill discovery quality on agent performance and student learning. The paper concludes that DataEnvGym, with its modular design and support for diverse tasks and student models, provides a valuable platform for developing and evaluating data generation agents, engines, and feedback mechanisms for automated model improvement. The code and leaderboard will be publicly released." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Limited Evaluation of Agent Architectures: The focus is primarily on the environment itself, with less emphasis on the architecture and training of the data generation agents. While baseline agents are provided, more sophisticated agent designs (e.g., reinforcement learning agents, agents leveraging larger language models) and their systematic evaluation would significantly strengthen the paper. How do different agent architectures compare in terms of effectiveness and efficiency? Are there specific architectural choices that are particularly well-suited for this task?\n\nOver-Reliance on LLMs for Data Generation: While using LLMs for data generation is a reasonable starting point, it raises concerns about the quality and diversity of the generated data. Exploring alternative data generation methods (e.g., data augmentation techniques, programmatic data generation) and comparing their effectiveness with LLM-based generation would be valuable. How robust is the framework to the quality of the generated data?\n\nLimited Analysis of Skill Discovery Quality: While the paper briefly touches upon the impact of skill discovery quality, a more thorough investigation is needed. How does the quality of the discovered skills affect the performance of the data generation agents and the student models? What are the limitations of the current skill discovery method, and how can it be improved? Quantitative analysis of skill quality (e.g., measuring coherence, coverage, and relevance) would strengthen the paper.\n\nLack of Comparison with Existing Methods: While related work on knowledge distillation and model weakness discovery is discussed, there's no direct comparison with existing methods for model improvement. How does DataEnvGym compare to techniques like curriculum learning or active learning in terms of effectiveness and efficiency? Including such comparisons would better contextualize the contributions and highlight the advantages of the proposed approach.\n\nLimited Discussion of Scalability: The experiments are conducted with relatively small datasets and models. How does DataEnvGym scale to larger datasets and more complex models? What are the computational challenges associated with training data generation agents in more realistic settings? Addressing these scalability concerns is crucial for practical applications." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024dataenvgym,\ntitle={DataEnvGym: Data Generation Agents in Teacher Environments with Student Feedback},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=00SnKBGTsz},\nnote={under review}\n}" }, "abstract": { "value": "The process of creating training data to teach models is currently driven by humans, who manually analyze model weaknesses and plan how to create data that improves a student model. Recent approaches using large language models (LLMs) as annotators reduce human annotation effort, but still require humans to interpret feedback from evaluations and control the LLM to produce data the student needs. Automating this labor-intensive process by creating autonomous data generation agents – or teachers – is desirable, but requires environments that can simulate the feedback-driven, iterative, closed loop of data creation. To enable rapid and scalable testing for such agents and their modules, we introduce DataEnvGym, a testbed of teacher environments for data generation agents. DataEnvGym frames data generation as a sequential decision-making task, involving an agent consisting of a data generation policy (which generates a plan for creating training data) and a data generation engine (which transforms the plan into data), inside an environment that provides feedback from a student. The agent’s end goal is to improve student model performance. Students are iteratively trained and evaluated on generated data, with their feedback (in the form of errors or weak skills) being reported to the agent after each iteration. As a general-purpose testbed, DataEnvGym includes multiple instantiations of teacher environments across three levels of structure in the state representation and action space, with varying levels of scaffolding support. More structured environments are based on automatically-inferred skills and offer a higher degree of interpretability and control over the curriculum. We support developing and testing data generation agents in three diverse tasks covering both text and images (mathematics, programming, and visual question answering) and test multiple student models. We find that example agents in our teaching environments can iteratively improve students across diverse tasks and settings. Moreover, we show that environments can teach different skill levels and can be used to test variants of key modules, pointing to directions of future work in improving data generation agents, engines, and feedback mechanisms. We will publicly release our code and leaderboard." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "iterative data generation", "llm agent", "lifelong learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/2fabe224ce80b58518b3e21579a58af4d807e6d7.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "DataEnvGym: Data Generation Agents in Teacher Environments with Student Feedback" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
00ezkB2iZf
MedFuzz: Exploring the Robustness of Large Language Models in Medical Question Answering
main
Active
large language model;adversarial machine learning;automatic red teaming
foundation or frontier models, including LLMs
3;3;5;6
4;4;5;3
3;2;3;3
2;2;4;3
3;2;3;3
4.25
4
2.75
2.75
2.75
-0.272166
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The authors need to provide further experiments and analyses to demonstrate the reliability of the questions generated by this method, such as incorporating the performance of human experts or introducing relevant methods for quality control of the questions in the methods section.\n\n2. Also, more analysis of the evaluation results should be included. For example, what are the main types of errors introduced by attacks across different turns? Which specific diseases or problem types is the target LLM less robust against? By supplementing these analyses, further insights can be provided for the development of medical LLMs." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "+ This paper examines the robustness of LLMs in the clinical decision-making process, a critical aspect of their application in the medical domain.\n\n+ The evaluation results demonstrate that current LLMs lack robustness in the clinical decision-making process, offering valuable insights for the development of medical LLMs." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates the robustness of large language models in handling medical QA tasks by introducing a new evaluation method, MedFuzz. For each multiple-choice question in the original benchmarks, MedFuzz uses an LLM (referred to as the attacker LLM) to reformulate questions by adding patient characteristics that may introduce social bias without affecting the clinical decision-making process. If the target LLM answers correctly, the attacker LLM is prompted to generate additional distracting questions based on the target LLM’s feedback. Additionally, a non-parametric statistical significance test was developed by prompting the attacker LLM to create questions with patient characteristics that avoid social bias. Using this evaluation method, the authors tested seven LLMs and found a significant performance drop across all models. Moreover, they observed that when current LLMs answer incorrectly, they tend not to reference the added biased information, indicating inconsistency in faithfully adhering to the clinical decision-making process." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "+ A major weakness of this paper is the faithfulness of the reformulated questions. The proposed MedFuzz method relies solely on prompt engineering with the attacker LLM (GPT-4) to modify original MedQA questions, making the attack process difficult to control. The attacker LLM may potentially alter critical information in the original questions, resulting in less reliable reformulated questions. The example in Section 3.1 also demonstrates that the attacker LLM added extensive information about the patient’s family medical history, consultation history, and medication history. These details are highly relevant in real clinical diagnosis and can significantly influence a doctor’s assessment of the patient’s condition.\n\n+ Moreover, although the authors propose a non-parametric statistical significance test, they do not provide the full distribution of p-values across the MedQA benchmark. In line 485, they note that for the successful attacks they selected, the p-values are <1/30, 0.1, 0.16, 0.5, and 0.63. Here, the p-value represents the probability that a control fuzz is more challenging than the original fuzz. Therefore, cases with p-values of 0.5 and 0.63 suggest that the performance decline in the target LLM is due to the perturbations themselves, rather than social bias.\n\n+ For the study of target LLM's faithfulness, it is important to also study the proportion of CoT that mentions the critical information in the original MedQA benchmark for comparison with the results provided in Figure 2B. Additionally, the authors should provide more information to help readers understand the specific process of this study. For example, how many cases were analyzed? Was the determination of whether fuzzed information was included made manually, or was an automated algorithm used?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "NA. Authors have provided an ethics statement in the draft as well." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "•\tThe authors can clarify how their approach to adversarial attacks differs from the misinformation approach in [1].\n\n•\tThe authors can clarify why unfaithfulness of generated responses is a crucial dimension to consider.\n\n•\tSection 2.2 Lines 104: The authors mention “two ways” in which MedFuzz differs from other adversarial ML approaches, though only one distinction is clear in the draft. I’m assuming the second way is the use of semantically coherent changes to the text. These few lines can probably be rephrased to add clarity.\n\n•\tThe authors have conducted their experiments on the MedQA dataset and taken advantage of a constraint imposed in the curation of this dataset. The authors could potentially add broad guidelines to expand on the fuzzing idea for other medical datasets. \n\n•\tHow can the authors ensure that the GPT-4 generated attack retains the same answer as the original QA pair being perturbed? Is there a possibility to evaluate this with the help of domain experts?\n\n•\tHow is the value of K set in Algorithm 1? This can be elaborated on in the Appendix section.\n\n•\tDoes the finding that LLM CoT does not mention the fuzzed information provide a way forward to identify adversarial inputs?\n\n•\tAnother interesting avenue would be to examine how different kinds of LLMs perform when used as the attacking/ target LLM. For example, can a smaller model generate adversarial inputs faster than a larger model like GPT-4?\n\n•\tMinor Comment: Is line 10 a duplicate of line 11 in Algorithm 1?\n\n[1] Han T, Nebelung S, Khader F, Wang T, Müller-Franzes G, Kuhl C, Försch S, Kleesiek J, Haarburger C, Bressem KK, Kather JN. Medical large language models are susceptible to targeted misinformation attacks. npj Digital Medicine. 2024 Oct 23;7(1):288." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "•\tClarity: The paper is well written and easy to follow along. The authors have given adequate and clear examples at appropriate locations in the draft to aid readability. Good use of illustrations after consultation with domain experts (clinical collaborators in this case). The authors have also acknowledged the limitation of using contaminated training data.\n\n•\tOriginality: The idea to use social biases a clever way to incorporate real life information into the MedQA dataset.\n\n•\tQuality: The evaluation involves the use of proprietary vs open source and general purpose vs domain specific models. The experiment settings for reproducibility like temperature have been provided. The approach should be easy enough to reproduce. \n\n•\tSignificance: The authors have tackled a relevant problem that needs to be addressed, given the rapid pace of the domain." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes an automated red teaming approach to attack LLMs. They attempt this in the medical context by modifying medical Q&A datasets (specifically on MedQA), by violating assumptions that do not hold good in real life settings. The goal of MedFuzz is to make LLMs provide the wrong answer while ensuring that clinicians can still provide the right answer. The authors have identified a crucial problem with the evaluations of LLMs in the medical domain and provided a way to generate a more realistic dataset to aid subsequent LLM evaluation. The novelty lies in the proposed dataset from MedFuzz and the statistical evaluation used to check if the attack was successful." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "•\tIn the case of MedQA dataset, the authors have identified a social bias which may be present in real life situations, which are removed in the original benchmark. It is unclear how easy it is to identify and exploit such peculiarities in other medical benchmarking datasets like MedMCQA[1], PubMedQA[2] etc.\n\n•\tThe authors create the adversarial questions by an iterative multi-turn approach. Although the authors allude to the target LLM forgetting about previous Q&A attempts, would the approach be better validated if the evaluation is done in a single-turn manner?\n\n•\tThe authors, in step 4, only validate the statistical significance of 4 individual interesting cases. How would this change if considered for all successful cases?\n\n[1] Pal A, Umapathi LK, Sankarasubbu M. Medmcqa: A large-scale multi-subject multi-choice dataset for medical domain question answering. InConference on health, inference, and learning 2022 Apr 6 (pp. 248-260). PMLR.\n\n[2] Jin Q, Dhingra B, Liu Z, Cohen WW, Lu X. Pubmedqa: A dataset for biomedical research question answering. arXiv preprint arXiv:1909.06146. 2019 Sep 13." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* Why was MedQA the only dataset used? There are a few other multiple choice medical QA ones liked MedMCQA, PubMedQA, and MMLU Clinical topics. Why MedQA?\n* Why was only GPT-4 used as the attacker LLM? Seemingly there are other open source ones that have just as much medical knowledge especially looking at the fine-tuned example. \n* The workflow for the Step 2 is quite a few iterative turns. Are they all necessary to generate grounded ones? Is this workflow generalizable to other LLMs? Or is it GPT-4 specific?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* The idea of the paper is interesting -- existing medical QA datasets are fairly simplified and may not appropriately represent real-world clinical settings. Thus, there is a need to understand how safe LLM usage is for the medical domain via robustness analysis.\n* The intuition for the adversarial biasing comes from medical domain understanding of the benchmark constructions.\n* Authors benchmark 3 closed LLMS and 4 open-source, medically fine-tuned LLMs." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes an adversarial method for evaluating LLM performance on medical question-answering benchmarks to assess their robustness in real-world clinical settings. The idea is to automatically generate new question-answer pairs from the existing benchmark such that they still represent realistic scenarios (e.g., including additional patient information) but the answers remain the same. The experiment results demonstrate that various baseline LLMs can be tricked into providing incorrect answers." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* One of the major claims of the method is that it will generate new questions that are semantically coherent and will not fool clinicians. However, there is no empirical proof that this is the case other than the analysis of a handful of case studies (one is presented in the main text). The prompt contains instructions for the attacker LLM it should not change the default answer but GPT-4 is not always guarenteed to follow the instructions or have all the correct medical knowledge appropriate.\n* Is there a reason why general domain adversarial prompting wasn't shown to be sufficient? A few studies are listed in 2.2 (first sentence) but no preliminary studies or experimental studies are shown to support this.\n* GPT-4 is chosen as the attacker LLM, but the question is why aren't other open-source models explored? In looking at OpenBIOLLM-70B performance, this also looks like a reasonable comparison to try and might even generate harder cases with less of the computation cost.\n* One of the comments in the introduction was the that existing benchmarks are not challenging enough including reducing real-life clinical situations to canonical multiple choice questions. Is there a reason why only one dataset was included and it was a multiple-choice one?\n* The statistical test is proposed to identify the significance of a successful attack using control fuzzes and to select the case studies, but what about the general distribution for the MedQA dataset? How stable is it broadly in identifying how significant a successful attack is? I understand this can be computationally intensive and costly but that also raises a bit of questions regarding the applicability of the method if it can't be done at scale. \n* The presentation could have been improved to provide some intuition at the beginning with potentially a simpler case study where less was added to make the LLM response change. Similarly, some of the text is written in a less digestible format. For example, the introduction of the test statistic could be improved by introducing notation first and then how you might compute it to understand what the statistic is looking to capture.\n* The citation format is incorrect, please use \\citep instead of \\cite as it detracts from readability." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "In the MedFuzz study, patient characteristics (PC) such as age, gender, race, and socioeconomic factors are added as perturbations to induce confusion in LLMs. One specific example presented by the authors is the use of “excessive hospital service usage by low-income patients.” This type of information could inadvertently reinforce social biases or perpetuate negative perceptions about certain demographic groups, rather than reflect clinical validity or fairness.\n\nWhen such characteristics are introduced as confusion-inducing factors, there is a risk that essential background information—critical for accurate diagnosis and treatment—could lead to biased outcomes. Therefore, further clarification and evaluation are needed to ensure that MedFuzz’s inclusion of such data as perturbations aligns with clinical relevance and fairness, and to mitigate any potential reinforcement of harmful social biases in the model." }, "flag_for_ethics_review": { "value": [ "Yes, Discrimination / bias / fairness concerns" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. It would be helpful to have specific examples illustrating the risks posed by the simplified assumptions in traditional benchmarks within clinical settings. For instance, if omitting certain patient characteristics or clinical contexts could lead to diagnostic errors, providing these examples would clarify the importance of this study for readers and highlight its relevance.\n\n2. I am curious whether the patient characteristics (e.g., age, gender) and social bias information added as perturbations in MedFuzz genuinely act as confusion factors within actual clinical environments. These details often serve as crucial data points in clinical decision-making, so further explanation on how these elements were deemed appropriate as confusion-inducing factors would enhance the clinical validity of this study.\n\n3. A clear explanation regarding the rationale for setting the perturbation iteration count to K=5 would be beneficial. For instance, do you have experimental results comparing the initial attack (K=1) with subsequent attacks (K=5) to illustrate how the LLM maintains performance with increasing perturbation levels? Such a comparison could provide a more reliable basis for evaluating the impact of iteration count on robustness in this study." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper introduces MedFuzz, a novel approach for testing the robustness of large language models (LLMs) in clinical contexts, which addresses the simplifications found in traditional benchmarks. MedFuzz is distinct in its approach by adding specific patient characteristics and social bias information to simulate the complexity of real-world clinical scenarios. This innovative framework offers a new direction for assessing LLM robustness by examining potential vulnerabilities in medical question-answering settings.\n\n2. The paper clearly explains the concept of MedFuzz and its application, particularly in using patient characteristics and bias elements to test model robustness. The experimental procedures and components are consistently described, making the study's objectives and methodology easy for readers to follow.\n\n3. MedFuzz presents a significant contribution as it provides a framework to evaluate how LLMs may perform in real clinical settings, beyond simplified benchmarks. This work has high practical relevance for the safe implementation of LLMs in healthcare by strengthening robustness assessment and reducing potential errors. It contributes an essential tool for enhancing LLM applicability in clinical practice, highlighting the importance of robustness in medical AI." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes MedFuzz, a novel approach designed to evaluate the robustness of large language models (LLMs) in medical question-answering contexts. MedFuzz introduces controlled perturbations in input text by adding patient characteristics (PC) and social bias information to simulate real-world variability and challenges encountered in clinical settings.\n\nThe authors highlight the limitations of traditional medical benchmarks that often simplify clinical scenarios and position MedFuzz as an advancement towards “beyond-the-benchmark” evaluations. Specifically, the paper presents experiments assessing LLMs' responses to MedFuzz perturbations and evaluates the consistency of chain-of-thought (CoT) explanations under these conditions. The study offers a new perspective on testing LLM robustness by addressing potential risks in clinical decision-making when assumptions of canonical benchmarks do not hold." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper defines robustness as the model’s ability to maintain performance in varied scenarios, which may lead to confusion with the concept of “generalization.” Typically, robustness refers to a model's resilience to perturbations or intentional adversarial attacks. To clarify the core aim of this study, a more explicit definition of robustness in the context of MedFuzz is recommended, particularly regarding how MedFuzz is designed to evaluate LLM robustness beyond generalization. Explaining how robustness is measured and differentiated from generalization could provide readers with a clearer understanding of the intended contribution.\n2. MedFuzz incorporates specific patient characteristics (e.g., age, gender, race, family history, background) as perturbations to assess LLM robustness; however, this approach may not accurately reflect clinical settings. Patient background information typically aids diagnostic decisions rather than introducing confusion. For instance, a patient’s age or medical history often plays a crucial role in diagnosis and would rarely be considered extraneous. Thus, further justification on why these characteristics are appropriate for simulating robustness under MedFuzz is recommended. Clarifying which patient data might clinically support decisions versus truly confuse the model would strengthen the study’s validity.\n3. The scale of text modification applied in MedFuzz risks excessive deviation from the original context, potentially impacting the robustness assessment. In section 3.1, for instance, added text can exceed 40% of the original passage, potentially leading to unintentional confusion beyond MedFuzz’s intended perturbation. A more focused perturbation approach—such as limiting changes to key sentences or reducing the proportion of added text—could provide a more accurate robustness assessment. This adjustment would align MedFuzz’s modifications closer to realistic conditions while still effectively evaluating LLM robustness.\n4. After applying MedFuzz, the Chain-of-Thought (CoT) explanations produced by the LLM were noted to omit important information, suggesting reduced fidelity. However, it is unclear whether this reduction in fidelity is due to MedFuzz’s perturbations or the LLM’s inherent limitations. It is recommended to first assess the fidelity and consistency of CoT explanations on the original benchmark without MedFuzz to identify the root cause of CoT discrepancies. Such an analysis would clarify whether the fidelity issues stem from MedFuzz or from the model itself, providing clearer insights into the reliability of the CoT explanations in real-world scenarios." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "An automatic redteaming method for testing the robustness of LLMs in medical question answering" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024medfuzz,\ntitle={MedFuzz: Exploring the Robustness of Large Language Models in Medical Question Answering},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=00ezkB2iZf},\nnote={under review}\n}" }, "abstract": { "value": "Large language models (LLM) have achieved impressive performance on medical question-answering benchmarks. However, high benchmark accuracy does not imply robust performance in real-world clinical settings. Medical question-answering benchmarks rely on assumptions consistent with quantifying LLM performance but that may not hold in the open world of the clinic. Yet LLMs learn broad knowledge that could help the LLM perform in practical conditions regardless of unrealistic assumptions in celebrated benchmarks. We seek to quantify how robust LLM medical question-answering benchmark performance is to violations of unrealistic benchmark assumptions. Specifically, we present an adversarial method that we call MedFuzz (for medical fuzzing). MedFuzz attempts to modify benchmark questions in ways aimed at confounding the LLM. We demonstrate the approach by targeting unrealistic assumptions about patient characteristics presented in the MedQA benchmark. Successful \"attacks\" modify a benchmark item in ways that would be unlikely to fool a medical expert but nonetheless \"trick\" the LLM into changing from a correct to an incorrect answer. Further, we present a non-parametric test for calculating the statistic significance of a successful attack. We show how to use calculate \"MedFuzzed\" performance on a medical QA benchmark, as well to find individual cases of statistically significant successful attacks. The methods show promise at providing insights into the ability of an LLM to operate robustly in more realistic settings." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "large language model", "adversarial machine learning", "automatic red teaming" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/9086aab30bbc4180cbbf3c113e82c12eecdff119.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "MedFuzz: Exploring the Robustness of Large Language Models in Medical Question Answering" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
01wMplF8TL
INSTRUCTION-FOLLOWING LLMS FOR TIME SERIES PREDICTION: A TWO-STAGE MULTIMODAL APPROACH
main
Active
Large Language Models;Time-series Prediction;Multi-modal;Instruction-following
learning on time series and dynamical systems
3;5;5;5
3;4;3;3
2;2;2;3
2;2;3;3
1;3;3;2
4.5
3.25
2.25
2.5
2.25
0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. For table 4, can you provide the same results, but for your model instead of only for TimeLLM? It would make it more obvious whether your model succeed on those tasks with incorrect textual information.\n2. For real world dataset, was the textual information always constant (as shown in section B.3) for each dataset? This would allow a finetuned model to fully ignore it, since it could bake said information in its weights anyway." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. It is good that zero shot examples of descriptions which have not been provided in the training set have been tested with. Without those, the narrow set of possible descriptions could have made it impossible to check whether the result quality came from the model overfitting on these descriptions or not.\n2. Training the model using generated data and computing how well the model follows the instructions is a relatively clean way to do a proof of concept of the idea, which is appropriate currently, as the field of using LLM and timeseries models together is still in its infancy." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The article describe a new model to incorporate textual information with a more traditional timeseries forecasting model. It does so by combining an embedding computed from the historical numerical data with an embedding computing from the textual information. The combined embedding is then used to generate the forecast.\n\nThe model is tested both on real-world data, where it shows competitive results, and on generated data, where it is shown to follow the instructions included in the textual information." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. There seems to be a mismatch between the described technique used to apply the modification (equation 3), and the examples shown (figure 3). According to the equation, the data in the forecast window should be a pure affine function, without any of the noise shown in figure 3.\n2. While the model is tested against other multimodal text+timeseries models, it should also be tested against pure LLM approaches: just plugging the text and the history in a prompt for GPT 4 or LLama 3, and looking at the generated output. While such an approach won't scale to long series, recent work have shown it to be surprisingly decent at forecasting under textual instructions. See: LLM Processes by Requiema 2024 for a slightly more complex approach, but there may be more appropriate references for the more direct one.\n3. Hyperparameters and training curiculum for the timeseries portion of the model are missing." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- **How would the proposed model perform without access to textual inputs or under noisy conditions?** If textual instructions are incomplete, inconsistent, or contain noise, how would the model's performance be affected? This scenario is particularly relevant in high-stakes areas like finance, where decision-making often involves dealing with imperfect information. What measures have been taken to ensure robustness against these issues, which are common in real-world data?\n- **How does the proposed framework address interpretability in practice?** The paper claims that incorporating textual instructions enhances interpretability, but there are no concrete demonstrations of how this contributes to meaningful insights for domain experts. Could you provide explicit examples or user studies that validate this claim? Without such evidence, how can the claim of improved interpretability be substantiated?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- A novel two-stage framework for integrating temporal and textual data.\n- A data generation workflow for instruction-based forecasting, compatible with LLMs.\n- Comprehensive ablation studies and comparative evaluations demonstrating the effectiveness of TITSP." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents Text-Informed Time Series Prediction (TITSP), a multimodal framework that integrates textual context with time series data using Large Language Models (LLMs). The approach involves two stages: AutoPrompter, which aligns time series data with text embeddings, and a refinement stage that incorporates task-specific textual instructions to enhance prediction accuracy and interpretability. While TITSP proves particularly effective for context-rich forecasting tasks, by demonstrating improved performance under specific settings against some other methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- **Technical Contributions are Incremental** The proposed approach lacks significant technical innovation. Integrating LLMs with time series is an incremental step rather than a groundbreaking contribution. The use of cross-attention and VQ-VAE offers no substantial improvement beyond established techniques.\n- **Poor Structure and Clarity** The paper is poorly organized, with unclear explanations and an incoherent flow. The motivation and rationale for the proposed method are inadequately communicated, and critical components like AutoPrompter are explained in a convoluted manner, hindering comprehension.\n- **Inadequate Experiments** Experimental validation is weak, relying heavily on synthetic datasets that limit the assessment of practical applicability. Comparisons to related state-of-the-art methods are lacking, and statistical significance testing is absent, making it difficult to validate the performance claims.\n- **Superficial Related Work** The related work section lacks depth and fails to properly differentiate the contribution from prior research. Key works are missing or insufficiently discussed, weakening the justification for originality.\n- **Numerous Typos and Lack of Polish** Frequent typos (e.g. citation mistaches in line 54-55), poorly formatted figures(fig. 6), and poorly constructed tables suggest a lack of careful proofreading, which detracts from the overall quality and credibility of the paper.\n- **Insufficient Practical Insights** The claimed interpretability through textual integration lacks demonstration. There are no real-world examples showing how domain experts would benefit from these insights, making the practical value of TITSP unclear." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "The paper does not raise any significant ethical concerns." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see the weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper presents a novel approach to time series forecasting by integrating textual instructions, which is a creative extension of existing multimodal time series models. The introduction of a two-stage framework and the focus on instruction-based forecasting address a significant gap in the field.\n2. The paper is well-written and logically organized. The figures and tables are clear and effectively support the text. The problem formulation and the description of the methodology are easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces Text-Informed Time Series Prediction (TITSP), a novel two-stage framework that enhances time series forecasting by integrating domain-specific textual information. The paper demonstrates that TITSP significantly outperforms traditional and existing multimodal approaches, improving both predictive accuracy and interpretability." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Given the synthetic data generation process, how can the authors ensure that there is no data leakage between the text data and forecasting targets? Could the authors provide a detailed explanation of the data generation process to address this concern?\n2. How practical is the proposed approach in real-world scenarios where textual instructions may not always be available or may be ambiguous? Could the authors discuss the potential limitations and challenges in deploying TITSP in practical applications?\n3. Has the model been tested on any other multimodal time series analysis tasks beyond forecasting? If not, what are the potential challenges in applying TITSP to other tasks?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Questions:\n1. The choice of order compliance rate as an evaluation metric is intriguing. This metric appears specifically tailored to the instructions outlined in the paper, which may limit its applicability to real-world scenarios. Could you clarify the advantages this metric offers over existing metrics for evaluating forecasting performance?\n\nSuggestions:\n\n- Benchmark results against a broader selection of existing multimodal forecasting models to enhance comparative insights.\n- Include a detailed discussion of the dataset, covering aspects such as sample size, history length, and forecasting horizon.\n- If feasible, incorporate more complex textual cues in the experiments to better reflect real-world forecasting challenges." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The strengths include the relevance of the problem of text-aided forecasting and the novelty of the prompting method. The methodology section is comprehensive and well-described, and the techniques and experiments have been explained in detail and are easy to follow. The figures convey the overall idea and highlight the improvements over the no-instruction setup." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a novel two-stage for multimodal forecasting through historical data and textual cues that are useful for LLM-based forecasters. The multimodal framework is evaluated on numerous multimodal forecasting tasks. The paper provides a setup to include expert opinions for a forecasting problem." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The primary weaknesses of the paper are as follows:\n\n1. **Incomplete Literature Coverage**: Section 2.2 does not fully address relevant multimodal forecasting models, omitting key references such as UniTime ([https://dl.acm.org/doi/10.1145/3589334.3645434](https://dl.acm.org/doi/10.1145/3589334.3645434)).\n\n2. **Limited Comparative Analysis**: The results lack sufficient comparison with other multimodal forecasting models, reducing insight into how the proposed method performs relative to similar approaches.\n\n3. **Insufficient Dataset Description**: Essential dataset details, including sample counts, history length, and forecasting horizon, are not provided. Additionally, the impact of the forecasting horizon on prediction quality remains underexplored.\n\n4. **Simplistic Experimental Instructions**: The experimental instructions are overly simplistic, failing to reflect realistic scenarios. The limited set of training instructions may also suggest that simpler alternatives for instruction embedding could have been more effective.\n\n5. **Circular Evaluation**: The evaluation datasets have been tailored from existing datasets based on the training instructions intended for evaluation, which creates a circular reasoning issue that undermines the reliability of the evaluation setup. A similar statement about the order compliance rate metric can also be made.\n\n**Minor Issues:**\n\n1. The paper inconsistently uses closing quotes (\") instead of opening quotes (``) in multiple locations, including but not limited to lines 197, 203, and 213.\n\n2. Textual citations, rather than parenthetical citations, would be more suitable for the references in lines 117 to 128, enhancing the readability and flow of the text.\n\n3. Appropriate citations are not provided for the original dataset sources." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose TITSP, a multimodal framework that integrates textual knowledge with time series data using LLMs, significantly enhancing prediction accuracy and interpretability." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024instructionfollowing,\ntitle={{INSTRUCTION}-{FOLLOWING} {LLMS} {FOR} {TIME} {SERIES} {PREDICTION}: A {TWO}-{STAGE} {MULTIMODAL} {APPROACH}},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=01wMplF8TL},\nnote={under review}\n}" }, "abstract": { "value": "We introduce Text-Informed Time Series Prediction (TITSP), an innovative multimodal framework that integrates textual knowledge with temporal dynamics using Large Language Models (LLMs). TITSP employs a two-stage process that bridges numerical data with rich contextual information for enhanced forecasting accuracy and interpretability.In the first stage, we present AutoPrompter, which captures temporal dependencies from time series data and aligns them with semantically meaningful text embeddings.In the second stage, these aligned embeddings are refined by incorporating task-specific textual instructions through LLM. We evaluate TITSP on several multimodal time series prediction tasks, demonstrating substantial improvements over state-of-the-art baselines. Quantitative results reveal significant gains in predictive performance, while qualitative analyses show that textual context enhances interpretability and actionable insights. Our findings indicate that integrating multimodal inputs not only improves prediction accuracy but also fosters more intuitive, user-centered forecasting" }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Large Language Models", "Time-series Prediction", "Multi-modal", "Instruction-following" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/448e8a13abf683caa4fdc433d298a04dcb59bbe8.pdf" }, "presentation": null, "primary_area": { "value": "learning on time series and dynamical systems" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/4e0c464af7a349b9a73543bcd65624333bc923af.zip" }, "title": { "value": "INSTRUCTION-FOLLOWING LLMS FOR TIME SERIES PREDICTION: A TWO-STAGE MULTIMODAL APPROACH" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
029hDSVoXK
Dynamic Neural Fortresses: An Adaptive Shield for Model Extraction Defense
main
Active
Model Extraction Defense
alignment, fairness, safety, privacy, and societal considerations
1;5;5;6;8
4;3;3;3;4
2;3;3;3;2
2;3;2;3;4
2;2;2;3;3
5
3.4
2.6
2.8
2.4
-0.179029
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- The authors claim their approach falls under the model extraction prevention defense category. Still, it works like a detection approach where the OOD detector is built into the model itself and thus relies heavily on the OOD data used for classification. The results shared by authors, to argue otherwise, are insufficient. I would ask the authors to include more experiments for this argument. \n- If the model is trained to early exit in the case of OOD samples, but the labels used are from the original neural network (essentially the last possible exit), what is the accuracy of the model on OOD data used for training the model? I suspect that the early exit model misclassifies OOD data with high confidence. If it were learning the original network’s output labels for OOD data, then the defense would not work for the hard-label setting as the attacker would still receive a large portion of the original network’s labels as output with some erroneous ones.\n- Regarding the exit point evaluation ablation study, I would like to know the accuracy at each exit and the exact number of ID and OOD samples passing through each exit instead of terms such as “over half,” etc." }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The proposed idea of implementing early exits as a defense against model extraction is novel and sound.\n- The method is easily adaptable to different architectures like ResNets and ViTs. \n- The use of entropy and information bottleneck theory is sound and well-suited to the goal of reducing extractable information for the attacker.\n- The experiments conducted cover various scenarios, models and datasets validating its generalizability. The performance comparisons with state-of-the-art defenses further strengthen its credibility. \n- The ablation study is thorough and captures various scenarios that highlight the effectiveness of the proposed method and its components." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents “Dynamic Neural Fortress” or DNF framework as a defense against Model Extraction Attacks. These attacks allow an adversary to create a copy of a pre-trained model accessible via black-box APIs, posing risks to proprietary models. The authors identify two main challenges in current defenses: (1) Neural Network architecture protection, a thing that is taken for granted in previously proposed attacks by using the same model architecture for victim and clone models, and (2) optimizing computational resources by avoiding allocation of equal resources to both benign and attack queries. \n\nThe authors implement an Early-Exit neural network wrapper (EENN) on top of a trained model. This wrapper facilitates random exits at earlier layers for attack queries while preserving model utility by making benign queries exit at later layers. The authors assume the usage of out-of-distribution (OOD) data by attackers in most cases, but there are some experiments conducted for in-distribution (ID) data as well. Using concepts from deep information bottleneck theory, the authors optimize mutual information between input data, latent features, and output labels for training the EENN model. \n\nThe proposed method has been evaluated via testing on various architectures and datasets, and compared against other state of the art defenses." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper presents a technically sound idea, but the presentation is poor and needs major revisions. I am listing the weaknesses sectionwise. \n### Related work:\n- The related work is not organized properly, and some works are not cited in their appropriate sections, although they are cited later in the paper. For example, ActiveThief by Pal et al. (2020) [1] should be present under functionality stealing. \n- When a model extraction attack is data-based, the data might be natural or synthetic. For E.g., I can generate a dataset of 10,000 images from a pretrained generative network and use that for model extraction. This would still fall under the category of data-based model extraction. Data-free model extraction means that the data used for stealing is generated based on some information received from the victim. \n- Therefore, restructuring the related work section is necessary here. \n\n### Methodology:\n- The steps followed to convert a pre-trained victim model into an EENN are not easily followed. A network is trained on the ID data first. Then exit classifiers are added on top of it. Then, an OOD generator is used to generate OOD data, which is then passed through the original network without the exit networks for inference. The steps followed after this are not written in a coherent manner. One has to go through Algorithm 1 to get a clear picture of the training process.\n- Overuse of the term specific to start two consecutive paragraphs (224-235 and 236-241) and even inside the paragraphs when the sentences contained in both paragraphs are not specific at all. \n\n### Experimentation:\n- The authors should differentiate between the DFME and DBME settings in more detail. In line 387, it is assumed that the reader will know that they are talking about the DFME setting instead of the soft-label setting. This also invites confusion regarding the budget difference between the soft and hard label settings, where the budget should be the same for valid comparison. \n- For the DFME setting, one clone model architecture should be the same as the victim model for valid comparison (Resnet-34 in this case). Previous methods, like the prediction poisoning [2] method used by authors for comparison, have conducted experiments that keep the victim architecture for the stolen model. Moreover, the proposed method is not better than MeCo for the CIFAR-10 dataset. This should be analyzed and discussed.\n- For the DBME setting, using the random strategy for sampling images is not ideal. It has been shown in the ActiveThief [1] paper that using an uncertainty-based sampling method is more effective. \n- To showcase the effectiveness of the in-distribution defense, using JBDA as the attack strategy is fairly obsolete, and the paper cited needs to be corrected. The paper that proposed the attack is [3]. The authors should use either ActiveThief or Knockoff nets attack for evaluation as they are more recent and utilize intelligent sampling-based strategies for attack. If an actual attacker has access to in-distribution data, they will try to use the best strategy possible. \n- To demonstrate the defense’s effectiveness against model architecture stealing, the authors pick the latest attack by Carlini et al. but fail to show effectiveness against previously cited work, specifically “Towards reverse-engineering black-box neural networks. In International Conference on Learning Representations, 2018.” that perform attack on imagenet models. Considering that this was one of the major claims made by the authors, they should evaluate this aspect thoroughly. \n\n\n### Grammar:\nThe paper has incoherent paragraphs, spelling mistakes, and redundant sentences. Some of them are listed below:\n- Line 225, it should be “convert” instead of “covert.”\n- In Table 1 and Table 2, the spelling of label is incorrect. \n- Appendix D, Lines 778-779, same line repeated twice. \n\nCitations:\n- [1] Pal, Soham, et al. “Activethief: Model extraction using active learning and unannotated public data.” Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 34. No. 01. 2020.\n- [2] Orekondy, Tribhuvanesh, Bernt Schiele, and Mario Fritz. “Prediction poisoning: Towards defenses against dnn model stealing attacks.” arXiv preprint arXiv:1906.10908 (2019).\n- [3] Papernot, Nicolas, et al. “Practical black-box attacks against machine learning.” Proceedings of the 2017 ACM on Asia conference on computer and communications security. 2017." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Can you provide a formal definition or description of in-distribution and out-distribution data in this paper's setting? How to distinguish the normal user data (OOD) and attack data (OOD)?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "+ Good motivation. The authors adopt multi-exit architecture to defend architecture extraction attack, which is a well motivated and interesting idea.\n+ Extensive evaluation. The authors not only evaluate the defense effectiveness but also adaptive attacks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a new defense against model extraction attack for model architecture and model utility. The key idea is to use multi-exit neural network architecture and its random exit mechanism to protect the network's architecture while ensuring the efficiency. For benign queries, the authors trains the early-exit model to distinguish OOD data (attack queries) and in-distribution data to ensure the model utility.\nFinally, the authors show that DNF outperforms previous defenses and evaluate the adaptive attacks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The assumption of attack data are OOD data, although widely adopted in prior work, should be more carefully justified. Meanwhile, as the model's training data are unknown to the user, benign queries may also be OOD data. DNF might decrease the model utility in this case.\n- The main part of paper (Section 4) is somehow hard to follow. I would suggest the author to simplify the notations or subscripts. Moreover, I also suggest the authors to provide an overview figure to replace some descriptions.\n- Although the authors investigate the adaptive attacks, the adversary can still design more powerful attack by exploiting the multi-exit model. Please discuss more about the potential vulnerability of multi-exit architecture and compare with prior attacks on multi-exit networks.\n\n[1] Auditing Membership Leakages of Multi-Exit Networks. ACM CCS 2022.\n\n[2] Model Stealing Attack against Multi-Exit Networks. arXiv:2305.13584.\n\n[3] Mind your heart: Stealthy backdoor attack on dynamic deep neural network in edge computing. IEEE INFOCOM 2023.\n\n[4] Aegis: Mitigating Targeted Bit-flip Attacks against Deep Neural Networks. Usenix Security 2023.\n\n[5] Prediction Privacy in Distributed Multi-Exit Neural Networks: Vulnerabilities and Solutions. ACM CCS 2023." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* Can the proposed defense be easily extended to other tasks and domains, such as object detection and NLP applications?\n\n* Does the number of exit points impact the performance of the proposed defense?\n\n* According to the design, earlier blocks are intended to reduce the model's predictive capability. However, it is notable that the ID dataset maintains high accuracy even after exiting at Exit2. This raises questions about the effectiveness of the defense mechanism. Moreover, the OOD dataset still retains 35% of its data after passing through the last two blocks. What is the observed defense effect in this case?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* The first defense framework simultaneously offers three key protective benefits: protecting the functionality, and model architecture, while improving the efficiency of the inference.\n\n* An innovative design of the loss function is achieved by incorporating the Information Bottleneck (IB) theory.\n\n* The experimental design is well-structured and covers various scenarios, effectively validating the method's effectiveness." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The dynamic neural fortress (DNF) defense method introduced in this paper employs a dynamic early exit neural network to defend model extraction attacks. This approach effectively provides simultaneous protection for model functionality, network architecture, and enhanced defense efficiency against these threats. Extensive experiments demonstrate that the proposed defense method outperforms SOTA model extraction defenses in terms of both effectiveness and efficiency." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* The claims regarding the protection of model architecture are overstated. Early Exit (EE) mechanisms indeed prevent attackers from executing the entire pipeline of DNN, therefore protecting the entire model architecture information from being leaked. However, the authors fail to provide how attackers might exploit this vulnerability to steal the model architecture when executing the entire network. Furthermore, EE mechanisms typically occur in the last few layers of DNNs; therefore, while the proposed approach may protect certain layers, it only works those that are unexecuted, leaving the majority of the neural network vulnerable (if there are effective attacks that can steal the model architecture). The authors should consider discussing these limitations in a dedicated section titled \"Limitations.\"\n\n* The definitions of out-of-distribution (OOD) and in-distribution (ID) data lack clarity. It is unclear why the authors consider OOD data to be \"illegal\" while ID data is deemed \"legal,\" and the rationale behind the corresponding loss term needs further explanation. Additionally, the authors aim to minimize the mutual information between $X_{id}$ and $Z_{id}$ in Eq. (3). However, this approach could potentially compromise the overall performance of deep neural networks (DNNs). The authors should provide additional clarification on why a reduced mutual information between $X_{id}$ and $Z_{id}$ does not impact the prediction accuracy.\n\n* Table 12 indicates that queries drawn from ID dataset exit at Exit2 over 90%, while the OOD queries only exit at about 75% at the same stage. This discrepancy seems inconsistent with the motivation behind two loss terms in Eq. (3) and Eq. (4). The authors should explain this discrepancy and discuss how it impacts the effectiveness of the proposed defense mechanism. I would like to suggest the authors provide a more detailed analysis of the exit patterns for ID vs OOD data.\n\n* The explanation for choosing a specific mutual information optimization method to achieve the defense objectives lacks a deeper theoretical explanation and intuitive justification, making it challenging to fully follow the principles behind the proposed method.\n\n* The experiments conducted to protect the model architecture appear limited, which does not sufficiently demonstrate the contribution related to model architecture protection mentioned in the paper. Consider adding additional experiments and evaluation metrics specifically designed to assess the robustness of the model architecture against potential theft. \n\n* It would be advantageous to include experiments that investigate the correlation between accuracy and exit points, providing a clearer visualization of the early exit mechanism's impact. I would like to suggest a graph showing accuracy vs. exit points for both ID and OOD data or report a statistical analysis of this relationship.\n\n* It seems that all datasets utilized are classification datasets, which makes it difficult to validate the effectiveness of the proposed method in other tasks and domains.\n\n* The notations in this article have been used repetitively, e.g., $r$." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Concepts related to entropy and IB regularization are presented with some mathematical rigor and learning objectives for both ID and OOD data are presented with entropy and IB regularization contratints; However some additional insights into potential limitations are necessary – How would the strategy perform under adaptive attacks with a much varied and increasingly sophisticated OOD spectrum? And how it would impact models that aim for domain generalizability and to incorporate that OOD spectrum into their model’s capabilities?\n2. How does this defensive method translate to multi-modal architectures like VLMs. Or multi-pipeline methods where each branch operates on different modalities? Or ML methods where different models are trained for different modalities and their outputs are combined (via some aggregation)?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper presents an interesting defenseive method to counter model extraction attacks. The paper’s novelty lies in the core idea of using a dynamic exit strategy based on the input query. While early exit strategies have been explored in the context of neural networks, their application to defensive methods is novel.\n2. The paper is well written, and the core idea is simple to understand. The language is lucid but see weakness 2, 3.\n3. The paper is well organized with a clear progression between sections. Figure 1 greatly aids clarity in trying to understand the pipeline, however see weakness 2.\n4. Experimental evaluation is robust and does seem to support the author’s claims that DNF achieve substantial reduction is successful model cloning.\n5. This paper addresses a growing concern in the space of AI/ML model deployment – protecting against model cloning and privacy and intellectual rights protection. This work does have the potential to help drive forward work in defense space for these attack types." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Model extraction is a type of attack where an attacker tries to replicate a victim model to either:\n1. Estimate the model’s parameters to emulate the model’s performance\n2. Copy the model’s architecture, to recreate the model as-is.\n3. Get protected knowledge of the training data of the victim model, to better understand the data distribution it was trained on, so that other type of adversarial attacks can be done.\n\nExisting defense strategies are costly – they do not differentiate between benign and malicious queries from an attacker and this form of defense allocates the same computational power to both. This paper provides a novel way to tackle model extraction attacks – Dynamic Neural Fortresses. \n\nThey propose an early-exit strategy wherein the victim model has built-in early exits routes that the model can take and provide outputs that are OOD from it’s expected input-output combination. If an input query matches an early-exits threshold, the model inference exits with the output at that stage." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Despite strength 5, this method can be adapted widely only after these weaknesses are addressed and questions explored.\n2. Should make better use to visual elements – probably, atleast in the appendix, add an example of what an attack query would look like, why the victim system would classify the query as attack and what the victim model’s behaviour would be on it, how early would it exit?\n3. Math is useful and helps to aid the reader’s understanding but at times also hampers readability. Especially in textual sections it breaks the flow of readers. Something that may help is condense the math and limit them to equations that can be repeatedly referenced or have a table of symbol notations that readers can refer to.\n4. Some sections could use with clearer explanations - OOD Data Learning Objective, underlying theory for Entropy and IB regularization. Maybe providing examples around mutual information or ER could help.\n5. The paper does provide some explanation about Entropy and IB regularization but could expand a little more on how mutual information reduction leads to lower predictability and can be leveraged for distinguishing between benign and malignant queries.\n6. Maybe a comparison with other information-theory based approaches such as standard adversarial training would help drive home the imminent advantages on DNF. Another set of comparisons that could strengthen the paper’s results are against other dynamic architectures (example ‘BranchyNet’).\n7. The paper uses ER to determine optimal exits from the model’s inference. However the choice of thresholds is only briefly discussed. Maybe an ablation study of various hyperparameters, exit thresholds and entropy weights could help explain the choice a certain threshold or explain the assumptions that the authors may have made." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Could you please estimate the impact of early exiting for IID samples? For instance, you might compute the misalignment in model outputs for IID samples when they exit early with respect to being forwarded into the entire network.\n- Could you please evaluate the defense against a worst-case attacker, enhancing the already implemented adaptive attacks with (partial) knowledge of the training data distribution?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper presents a clearly novel idea to address a very relevant issue. Indeed, to the best of my knowledge, this is the first application of a multi-exit neural network to defend against model extraction attacks.\n- The proposed network architecture can also reduce the inference time during deployment.\n- The approach is very intuitive and well-justified.\n- The reported results are promising." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, a defense against model stealing attacks (targeting either the model architecture or its functionality) based on a multi-exit neural network is proposed. The main idea is to output accurate prediction scores for ID data from the later network exits, as well as uninformative scores for OOD data from the earlier exits. To do so, for each network exit, a thresholded classifier is trained on the respective intermediate layer representation with a specifically designed loss, which maximizes the aforementioned objective using concepts from information theory. During the deployment, an exit is chosen for a sample when the maximum score of an exit classifier exceeds the respective threshold." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- 90% of IID samples exit in the first 3 exits. Although this can be viewed as a benefit (it reduces the inference time), on the other side, the defense mechanism will produce less informative outputs for those samples. The impacts of these effects should be clearly understood.\n- I appreciate the fact that the authors consider different types of attacks and try to implement adaptive ones. However, a best practice when dealing with security is to simulate a worst-case scenario against the strongest attack. This helps understand the limitations of the defense and estimate lower bounds of robustness in these settings - even if, in practice, they are unlikely to occur. In this case, the adaptive attacks should be implemented using model extraction techniques that rely on some knowledge about the training data distribution. This assumption is not too unrealistic, as it might happen that the attacker (who knows the domain on which the model is applied) is able to gather in-distribution data from public domains - for instance, if the model is a malware detector, it should be very easy to collect samples and also very likely to have some overlap between them and the training data used by the victim. In other cases, the attacker might possess a subset of or all the training data, and she could easily train its own model, but she is rather interested in reproducing the exact model functionality and reproducing its decision boundaries to build a surrogate model and use it for other attacks (like evasion ones, aka adversarial examples)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024dynamic,\ntitle={Dynamic Neural Fortresses: An Adaptive Shield for Model Extraction Defense},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=029hDSVoXK},\nnote={under review}\n}" }, "abstract": { "value": "Model extraction aims to acquire a pre-trained black-box model concealed behind a black-box API. \nExisting defense strategies against model extraction primarily concentrate on preventing the unauthorized extraction of API functionality. However, two significant challenges still need to be solved: (i) Neural network architecture of the API constitutes a form of intellectual property that also requires protection; (ii) The current practice of allocating the same network architecture to both attack and benign queries results in substantial resource wastage. To address these challenges, we propose a novel \\textit{Dynamic Neural Fortresses} (DNF) defense method, employing a dynamic Early-Exit neural network, deviating from the conventional fixed architecture. Firstly, we facilitate the random exit of attack queries from the network at earlier layers. This strategic exit point selection significantly reduces the computational cost for attack queries. Furthermore, the random exit of attack queries from earlier layers introduces increased uncertainty for attackers attempting to discern the exact architecture, thereby enhancing architectural protection. On the contrary, we aim to facilitate benign queries to exit at later layers, preserving model utility, as these layers typically yield meaningful information. \nExtensive experiments on defending against various model extraction scenarios and datasets demonstrate the effectiveness of DNF, achieving a notable 2$\\times$ improvement in efficiency and an impressive reduction of up to 12\\% in clone model accuracy compared to SOTA defense methods. Additionally, DNF provides strong protection against neural architecture theft, effectively safeguarding network architecture from being stolen." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Model Extraction Defense" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/bf422ad6c14f7dc2ca3d5a9bb6f184542a4a40f2.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Dynamic Neural Fortresses: An Adaptive Shield for Model Extraction Defense" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
02DCEU6vSU
Gen-LRA: Towards a Principled Membership Inference Attack for Generative Models
main
Active
Privacy;Membership Inference Attacks;Generative Models
alignment, fairness, safety, privacy, and societal considerations
3;3;5;5;8
4;4;4;3;4
2;2;2;3;3
2;2;3;2;3
3;3;3;3;3
4.8
3.8
2.4
2.4
3
-0.054554
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Although I could follow the gist of the idea, some of the notation is not precisely defined. $p_{\\mathbb{P} \\cup x*}$. It might be clearer to skip Eq.s 3/4 and jump to Eq 5.\n1. Do you have any ideas for how to generalize this to forms of data that are not amenable to KDEs (even after applying PCA)?\n1. Section 5.3 is not clear to me. What exactly is the experiment here, and what is it supposed to demonstrate?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The idea of performing MIA on a generative model by using likelihood ratio of generated data between models with and without the targeted example is very natural and efficient. I'm not surprised that it is very effective, as demonstrated in the experiments. The paper is mostly well-written and well-motivated, and to my knowledge original." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper describes a membership inference attack on generative models. It requires a set of examples generated by the model, S, and a set of reference examples, R, presumably from the same distribution as the data the model was trained on. Then to guess whether some new point x* was part of the training data, it estimates the likelihood ratio of S between a model trained on R vs. a model trained on $R \\cup \\{x*\\}$ using two kernel density estimators. It then thresholds on the likelihood ratio. Experimental results demonstrate impressive improvements compared to baseline models, particularly when evaluated with the critical \"true positive rate at low false positive rate\" metric." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I'm afraid the specific approach of using kernel density estimators will limit the method's applicability to low-dimensional tabular datasets. I would love to see this idea generalized to higher-dimensional data, probably using something that will scale better than KDEs." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.The manuscript lacks a clear explanation of the practical utility of applying MIA to synthetic data. It remains unclear why synthetic data was chosen as the focus, rather than real-world or other benchmark datasets. The authors are encouraged to provide references in the Related Work section to strengthen the justification for studying synthetic data specifically. Expounding on the unique relevance of synthetic data to MIA would better demonstrate the necessity and contributions of this study.\n2.Several typographical errors and repeated references are present in the reference section, such as on Line 527 and Line 729. A thorough review of the references is recommended to ensure accuracy and consistency across all citations." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper introduces the Generative Likelihood Ratio Attack (Gen-LRA), a novel membership inference attack specifically aimed at detecting privacy leakage due to overfitting in generative models. Unlike prior methods, Gen-LRA employs a likelihood ratio-based hypothesis testing approach to infer membership without requiring extensive knowledge of the model structure or parameters. By leveraging density estimation techniques, the authors assess whether synthetic data generated by a model is overfitting to specific training data points, particularly in regions with outliers. The authors demonstrate that Gen-LRA significantly outperforms existing MIA methods across various generative architectures and datasets, with particular success in scenarios with low false positive rates, highlighting the nuanced privacy risks associated with generative models." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces the Generative Likelihood Ratio Attack (Gen-LRA), a novel membership inference attack specifically aimed at detecting privacy leakage due to overfitting in generative models. Unlike prior methods, Gen-LRA employs a likelihood ratio-based hypothesis testing approach to infer membership without requiring extensive knowledge of the model structure or parameters. By leveraging density estimation techniques, the authors assess whether synthetic data generated by a model is overfitting to specific training data points, particularly in regions with outliers. The authors demonstrate that Gen-LRA significantly outperforms existing MIA methods across various generative architectures and datasets, with particular success in scenarios with low false positive rates, highlighting the nuanced privacy risks associated with generative models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The effectiveness of Gen-LRA depends heavily on accurate density estimation, which can be challenging in high-dimensional data settings. The use of kernel density estimation (KDE) or principal component analysis (PCA) for dimensionality reduction may limit applicability and accuracy. This limitation is critical because the success of the Gen-LRA method hinges on reliable density estimation, which becomes less accurate in high-dimensional spaces without significant computational expense. Inaccuracies here can undermine the method's robustness, making this the most pressing limitation.\n2. Although Gen-LRA performs well at low false positive rates, its reliance on outlier detection may lead to elevated false positives in datasets with inherently high variability or complex distributions. False positives can impair the practical applicability of Gen-LRA in privacy-sensitive contexts, as overly cautious results may lead to unnecessary restrictions on data release. \n3. Gen-LRA presumes that privacy leakage primarily stems from overfitting, potentially overlooking other forms of leakage that may not manifest as local overfitting. This could lead to incomplete privacy assessments, as the Gen-LRA approach might miss privacy vulnerabilities that do not align with the overfitting model. Expanding Gen-LRA’s scope to address other leakage types could enhance its overall utility." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "The paper focuses on membership inference attacks, which could be leveraged by adversaries to launch privacy attacks." }, "flag_for_ethics_review": { "value": [ "Yes, Privacy, security and safety", "Yes, Potentially harmful insights, methodologies and applications" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "First, I would like to point out that I am not fully up-to-date on the literature regarding membership inference attacks, especially those involving tabular data. As a result, I may be unable to assess the novelty of this work and might not be familiar with the common settings examined in recent literature.\n\n1. The paper assumes the reference data is available to the attacker. This does not seem to be very realistic to me. Section 1 discussed that a common scenario for synthetic data release is that the data owner wants to release data for open research. This implies that such data is not available to the public before that (if such data is already available, then there is no motivation or value for the data owner to release an additional dataset). That means that the attacker does not have access to the reference data either. The prior work I knew often considered attacks that do not make such assumptions (e.g., https://arxiv.org/pdf/1705.07663 and https://arxiv.org/pdf/1909.03935).\n\n The paper claims that this setting is realistic in Section 2: \"We assume this in practice because this represents a plausible scenario for the owner of S as an attacker may be able to find comparable data in the real world...\" Unfortuantely, I do not fully understand this example. It would be great if the author can explain it in more detail in the rebuttal.\n\n2. Continuing on the above point, the paper needs to make it clearer what assumptions each of the baseline methods in Section 5 make. Which of them also makes the assumption that reference data is available to the attacker? This would clarify whether the claimed improvement comes from the relaxation of the assumptions or the fundamental advances of the algorithm itself.\n\n3. The paper only evaluates the proposed algorithm on tabular data. But this is not reflected in the title and abstract. By reading only the title and the abstract, the readers might be misled to think that the paper proposes and evaluates the attack on diverse data types.\n\n I think it is important to clarify that, as the proposed approach relies on kernel density estimation, which (as discussed in the paper) does not scale well with the data dimension. (The proposed approach relies on dimension-reduction techniques to tackle the issue.) Therefore, it is unclear if such a pipeline can work well on other more high-dimensional and complicated data such as images and text. \n\n4. How do you determine the kernel size and the type of the kernel in the experiments? Is the algorithm sensitive to that?\n\n5. Section 5 mentioned that \"For Gen-LRA, we found that the choice of k can have a small impact on the performance of the attack (See Appendix A.3), we therefore use the results of the best k choice for each run as the goal for an MIA is to characterize the maximal empirical privacy risk.\" I understand that choosing the best k could help \"characterize the maximal empirical privacy risk\". However, this table is mainly for comparing between different baselines. The comparison would be unfair if you chose the best hyper-parameter for your own approach while not doing that for the baseline methods.\n\n7. The discussion in Section 6.2 is nice, but it would be more self-contained if the paper could describe how DCR works in the main text.\n\n\nOther minor questions:\n\n1. Section 1: \"We demonstrate that Gen-LRA identifies a different source of privacy leakage relative to other commonly used MIAs.\" It would be better to clarify what \"the different source\" means here. I could only understand it after reading Section 5.\n\n2. Line 116 and 117: what are M and D? These notations do not seem consistent with what was used before.\n\n3. Line 127: typo on the left quotation mark\n\n4. Line 266: missing a )" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* The proposed method is simple and effective.\n\n* In general, the writing of the paper is clear.\n\n* The paper has demonstrated results on many datasets and models." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a new approach to do membership inference attacks for tabular data generative models. The approach first estimates the distributions of (1) the reference samples plus the target sample and (2) the reference samples with kernel density estimation, and then computes the density ratio of synthetic samples over these two distributions. The intuition is that, if the target sample were used in training, the density of synthetic samples on distribution (1) would be higher. Results across various datasets and models show that the proposed approach yields better AUC-ROC and TPR at low FPRs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* The assumption that the reference data is available to the attacker is too strong.\n\n* The title and the abstract do not reflect the scope and constraint of the method sufficiently." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Can you expand the related work to also include the shadow-modeling based MIAs? \n\n- To truly understand the contribution, could you implement the shadow-modeling based MIAs [1,2,3] as well and report their results? Right now, the Gen-LRA method seems to be better than the prior work you consider, and does so with limited assumptions for the attacker and with limited computational cost. How does this change when the attacker now (i) has knowledge of the training algorithm and (ii) has the computational resources to train shadow models? Could authors implement these shadow-model MIAs and report the results alongside Gen-LRA? This would help to position the method and its results in the literature, giving a clear understanding of the impact of certain assumptions and computational cost on the MIA results. \n\n- Similarly, the work on shadow modeling MIAs also discusses disparate vulnerability of outliers [1,2,3]. Stadler et al [1] finds outliers to be more vulnerable than randomly selected records, while Meeus et al [3] proposes a method to identify more vulnerable records. Could authors have more elaborate results for the outlier discussion (e.g. show MIA results for outliers vs random points across datasets) and relate these findings to prior work? While the fact that Gen-LRA focuses on outliers is distinct from distance-based methods, these findings might not be very different than the ones in shadow-modeling based MIAs." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- Technically novel, and interesting, way to compute the membership inference inference signal from synthetic data. The method is theoretically grounded, computationally efficient and relies on limited assumptions for the attacker. \n- They show the method to outperform a range of MIAs from the literature\n- Comprehensive evaluation of the attack across 15 datasets\n- Authors include intuitive examples (eg Fig 1 and Sec 6.2) that are well explained and help the understanding of the paper." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces Gen-LRA, a novel membership inference attack (MIA) methodology for evaluating privacy risks in synthetic tabular data. The authors propose a hypothesis testing framework that computes a likelihood ratio specifically targeted at identifying any local overfitting of the target record. The method requires minimal assumptions, just access to the released synthetic dataset and a reference dataset. They find their method to outperform baselines from the literature across 15 datasets. They further find their method to be particularly successful against outliers, in contrast with other MIAs from the literature." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(More details see questions)\n\n- My main concern comes down to a lack of related work being discussed. A range of important works have studied MIAs against synthetic tabular data using shadow modeling [1,2,3]. While I understand that these works are computationally more expensive and additionally rely on the attacker's knowledge of the training algorithm, I find these works to be very relevant to position this paper and its findings. \n- Limited secondary insights with experimental depth. For instance, to make the claim that the method works better for outliers (especially compared to other methods), section 5.3 is mostly anecdotal. \n\n[1] Stadler, T., Oprisanu, B., & Troncoso, C. (2022). Synthetic data–anonymisation groundhog day. In 31st USENIX Security Symposium (USENIX Security 22) (pp. 1451-1468).\n\n[2] Houssiau, F., Jordon, J., Cohen, S. N., Daniel, O., Elliott, A., Geddes, J., ... & Szpruch, L. TAPAS: a Toolbox for Adversarial Privacy Auditing of Synthetic Data. In NeurIPS 2022 Workshop on Synthetic Data for Empowering ML Research.\n\n[3] Meeus, M., Guepin, F., Creţu, A. M., & de Montjoye, Y. A. (2023, September). Achilles’ heels: vulnerable record identification in synthetic data publishing. In European Symposium on Research in Computer Security (pp. 380-399). Cham: Springer Nature Switzerland." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "No further questions." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The likelihood ratio that Gen-LRA estimates is novel to my knowledge, and seems to be closer to the likelihood ratio that would be theoretically optimal than what previous work has looked at. The paper is easy to understand, and the writing is generally polished.\n\nLooking at TPR @ low FPR is good practice, and too often neglected in the MIA literature. The paper could even highlight these results further: most of the AUC-ROC scores for all methods are close to random guessing, but Gen-LRA is much more accurate than random guessing at FPR = 0.001." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a novel membership inference attack on synthetic data generators called Gen-LRA, based on estimating a likelihood ratio between the synthetic data coming from a reference distribution vs. it coming from the reference distribution with a target point included. Gen-LRA is benchmarked againt several competing attacks on a variety of datasets, where Gen-LRA generally outperforms the competition." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Using the PCA+KDE density estimator for DOMIAS is not fully fair, since the DOMIAS paper used a more sophisticated density estimator which was found to perform better than the KDE. Of course, the same estimator could also improve the results of Gen-LRA, and PCA+KDE could be computationally cheaper, but these should be checked empirically.\n\nUsing PCA may limit the applicability of outlier overfitting detection for outliers with rare categorical values. For example, consider the detection of overfitting on datapoints of French people on the Adult dataset. PCA weights the input dimensions based on how much variance they have, so the indicator for being French would have a very low weight (<1% of the data is French). As a result, the PCA outputs would be very similar between French and non-French people, and Gen-LRA would not be able to detect overfitting affecting French people. Unless I'm completely mistaken about this phenomenon, this should be mentioned as a limitation.\n\nFor a similar reason, you should check if datapoints with high DCR score have similarities. It could be that they do, but UMAP is not considering these important. This could change the interpretation of Figure 2 that DCR does not target specific outlier regions. \n\nYou should also discuss the fact that Ward et al. (2024) report a very similar finding to your Figure 2 with their MIA. As a part of this, it would be interesting to see analogues of Figure 2 for the other MIAs used as baselines.\n\nPlease include separate results from each dataset in addition to the mean results across datasets. The datasets could have significant performance differences that the aggregation hides. I'm also not sure if the standard deviations of performance across different datasets are meaningful in any way.\n\nMinor points:\n- The paper should make the differences between DOMIAS and Gen-LRA clearer, since the methods are fairly similar.\n- It not clear what $\\mathbb{P}\\cup \\{x^*\\}$ precisely is, which makes the motivation leading to Equation 4 seem a bit handwavy.\n- Contribution 1: this sentence is a bit unclear, making it seem like the null and alternative hypotheses are the same.\n- Line 172: capitalise \"equation 4\".\n- Line 266: missing parenthesis.\n- Line 346: \"scale\" is ambiguous, I would suggest \"normalise\" if that is what you are doing.\n- Several references are missing the publication forum, for example Durkan et al. (2019), Ganev and De Cristofaro (2023)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024genlra,\ntitle={Gen-{LRA}: Towards a Principled Membership Inference Attack for Generative Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=02DCEU6vSU},\nnote={under review}\n}" }, "abstract": { "value": "Evaluating the potential privacy leakage of synthetic data is an important but unresolved problem. Most existing adversarial auditing frameworks for synthetic data rely on heuristics and unreasonable assumptions to attack the failure modes of generative models, exhibiting limited capability to describe and detect the privacy exposure of training data. In this paper, we study designing Membership Inference Attacks (MIAs) that specifically exploit the observation that generative models tend to memorize certain data points in their training sets, leading to significant local overfitting. Here, we propose Generative Likelihood Ratio Attack (Gen-LRA), a novel, computationally efficient shadow-box MIA that, with no assumption of model knowledge or access, attacks the generated synthetic dataset by conducting a hypothesis test that it is locally overfit to potential training data. Assessed over a comprehensive benchmark spanning diverse datasets, model architectures, and attack parameters, we find that Gen-LRA consistently dominates other MIAs for generative models across multiple performance metrics. These results underscore Gen-LRA's effectiveness as an interpretable and robust privacy auditing tool, highlighting the significant privacy risks posed by generative model overfitting in real-world applications" }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Privacy", "Membership Inference Attacks", "Generative Models" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/bcad18f87958725e9b50970906e168913dcdf521.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/50c96fb68049a4bec3f129b7c7f85b812793218e.pdf" }, "title": { "value": "Gen-LRA: Towards a Principled Membership Inference Attack for Generative Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
02Od16GFRW
Ensembles provably learn equivariance through data augmentation
main
Active
equivariance;invariance;ensemble models;data augmentation;SGD
unsupervised, self-supervised, semi-supervised, and supervised representation learning
3;6;6
4;3;3
3;3;3
2;3;2
3;3;2
5
3.333333
3
2.333333
2.666667
-1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see the weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The work show the emergence of equivariant in ensemble models\n- The work generalizes previous works where the proof relied on NTKs\n- Experiments with large ensemble of models show the emergence of equivariance" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper shows that an ensemble of models when trained with data augmentation leads to emergence of equivariance properties naturally. The results generalize over past known results based on NTKs. The theory assumes some basic assumptions on the architecture and shows that, when the initialization of the weights in an architecture has some symmetry, then, the expected architecture of the ensemble is equivariant. Experimental results with various ensembles validates the results for the C4 group of symmetries." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I have several concerns over the usefulness of the theory and the experimental results.\n\nUsefulness of theory:\n- What is the use of the theory in model design or practical use cases? Since equivariant models seems to give perfect equivariance and data augmentation techniques give approximate equivariance. So, I am wondering what is the use of ensemble technique for symmetries, especially, given that we need over 1000 models to get good equivariant results.\n- What are the advantages of the proposed technique compared to existing symmetrization and canonicalization methods [1-4] that can convert non-equivariant models into equivariant ones using techniques somewhat similar to ensemble methods but with additional transformations that looks similar to augmentation.\n\nExperimental Results:\n- Although the experimental does show that the architecture with symmetric support does give invariant output, but even the asymmetric architecture seems to be giving invariant output, questioning the usefulness of the theory. It is also discussed in the paper about the symmetric states being attractors potentially, but, it still makes the current theory not very useful.\n- Experiments are only shown for C4 symmetries\n\n[1] Basu, Sourya, et al. \"Equi-tuning: Group equivariant fine-tuning of pretrained models.\" Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 37. No. 6. 2023.\n\n[2] Mondal, Arnab Kumar, et al. \"Equivariant adaptation of large pretrained models.\" Advances in Neural Information Processing Systems 36 (2023): 50293-50309.\n\n[3] Basu, Sourya, et al. \"Efficient equivariant transfer learning from pretrained models.\" Advances in Neural Information Processing Systems 36 (2024).\n\n[4] Kaba, Sékou-Oumar, et al. \"Equivariance with learned canonicalization functions.\" International Conference on Machine Learning. PMLR, 2023." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "The results in Table 1 aren't that clear to me. In the asymmetric case where you have a symmetric initialization, shouldn't you get results that are similar to the symmetric case? Yet there is a large gap" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- It generalizes the results in Gerken & Kessel \n- The topic of invariance/equivariance is important so these results would be of interest to people in that community" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper expands the results of Gerken & Kessel that show that data augmentation produces equivariant ensembles of models using NTK, by looking at finite network sizes. They then show empirically that their theoretical results indeed hold in practice (up to sampling errors)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My main issue is with the writing: \n- The results presented in the main text are quite trivial, that if you start with an invariant distribution and use an invariant flow you end up with an invariant distribution. The more interesting results are in the appendix (appendix B and C)\n- You writing $\\mathcal{L} = A_\\mathcal{L} + T\\mathcal{L}$ with $T\\mathcal{L}$ the tangent space is very confusing, as tangent space is defined for a manifold and we are talking about a linear space. It needlessly complicates things as there is no need to involve differential geometry when we are working on linear spaces." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Why does the OSP not increase at initialization when ensemble size increases?\n1. From the figures, it seems like the results could improve with more epochs (also for baselines). Could you please provide results with a larger number of epochs?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is well-structured and easy to follow.\n1. The paper extends previous results to more reasonable and applicable settings. This is a significant extension." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a theoretical analysis showing that data augmentation can lead to equivariance in deep ensembles. The paper's main result is that under several assumptions (e.g. on initialization, architecture, etc.), deep ensembles trained with data augmentation are equivariant in mean, even when individual models are generally not. A similar result was previously presented, but the paper extends these previous results, which were primarily focused on infinitely wide NNs trained with gradient descent under full augmentation, to ensembles of finite-width trained with SGD and random augmentation.\nThe paper is mainly theoretical and validates the theoretical results through limited and small-scale empirical experiments." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I like the paper and believe it has a sufficient contribution and interesting results. However, there are several limitations stated below:\n\n1. While the assumptions for the theoretical analysis are more applicable compared to previous works, they still hold only for infinite-size ensembles. Any analysis (including empirical) on the error bounds for finite ensembles would be beneficial.\n1. While the results are important, the novelty is somewhat moderate in the sense that the emergent equivariance property of ensembles was previously proposed and the fact that the theoretical analysis heavily relies on previous works [1].\n1. From the empirical evidence, it is unclear if some of the assumptions (like symmetric initialization) are indeed necessary. The authors discuss this, but I believe it can be extended further.\n1. Empirical evaluation is limited. It would be beneficial to extend it to more settings, even by small modifications like considering cyclic groups C_k of different orders (k), different architectures, model sizes, etc.\n1. It would be beneficial to see the impact of ensemble size on the metrics in Table 1, like adding a line plot for ensemble size vs. OSP. The authors show results for different sizes, but summarizing them in one clear view would make it easier to follow.\n1. The paper could benefit from a clearer and more explicit discussion of the limitations of the results.\n1. Minor:\n - Line 37: “... a definitive question to the question…”.\n\nReference\n\n[1] Flinth & Ohlsson, Optimization Dynamics of Equivariant and Augmented Neural Networks, 2023." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We prove that ensemble models learn equivariance through data augmentation." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024ensembles,\ntitle={Ensembles provably learn equivariance through data augmentation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=02Od16GFRW},\nnote={under review}\n}" }, "abstract": { "value": "Recently, it was proved that group equivariance emerges in ensembles of neural networks as the result of full augmentation in the limit of infinitely wide neural networks (neural tangent kernel limit). In this paper, we extend this result significantly. We provide a proof that this emergence does not depend on the neural tangent kernel limit at all. We also consider stochastic settings, and furthermore general architectures. For the latter, we provide a simple sufficient condition on the relation between the architecture and the action of the group for our results to hold. We validate our findings through simple numeric experiments." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "equivariance", "invariance", "ensemble models", "data augmentation", "SGD" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/d6a8bd193bcc928733dcbba2b6319d8fcb54d671.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/b8e3c4c2c81cde74095c52c2c359a5d2af6cf52f.zip" }, "title": { "value": "Ensembles provably learn equivariance through data augmentation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
02haSpO453
VILA-U: a Unified Foundation Model Integrating Visual Understanding and Generation
main
Active
Unified Visual Language Model;Autoregressive Model
foundation or frontier models, including LLMs
3;5;5;6
4;5;3;4
3;2;2;4
2;2;3;3
3;2;3;3
4.75
4
2.75
2.5
2.75
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Please share missing details as mentioned in the weaknesses\n- What are the number of image and video tokens going into the LLM? How many tokens are processed by the RQ-transformer and what is its size (the RQ-VAE paper has multiple different settings)?\n- It would be interesting to see if the vision tower training results hold for a general VAE setup instead of an RQ-VAE since that would make the results even more broadly applicable" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- The paper's most interesting contribution is the unified vision tower exploration to unify generation and understanding and the appropriate ways to train such an encoder\n- The approach is quite straightforward and the application of RQ-VAE allows for token efficiency while preserving more information\n- VILA-U is close to SOTA on visual understanding tasks (image and video) with comparable models\n- The model also fares well on image generation tasks and comes close to diffusion models" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "- The paper presents VILA-U, a unified model for language, image and video understanding + generation\n- The model is trained with an autoregressive next token prediction loss for all tasks\n- The paper explores vision encoder choices to ensure understanding and generation performance" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The method chooses RQ-VAE for efficiency, but there isn't a discussion / results around this. How would the results look if the vision tower didn't use RQ-VAE? How important is the RQ-VAE?\n- The generated images are relatively low-resolution (256 or 384px), especially since the RQ-VAE allows for increased efficiency in tokens\n- The paper doesn't really discuss video implementation details. Video understanding and generation have a mismatch in FPS / durations they usually support, what does VILA-U support? There isn't a discussion around this.\n- The paper claims to support video generation, but there are no quantitative results around this. The two qualitative examples are also very simplistic in Figure 7." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the weaknesses section." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. VILA-U introduces a unified framework that handles both visual understanding and generation in a single autoregressive next-token prediction model. \n\n2. The model leverages a unified vision tower that uses contrastive learning to align discrete visual tokens with textual inputs, which enhances the model's visual perception and text-visual alignment capabilities.\n\n3. The experiments indicate the state-of-the-art performance of VILA-U in both image generation and understanding." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents VILA-U, a unified foundation model for visual understanding and generation that integrates image and language processing into a single autoregressive next-token prediction framework. Unlike traditional visual language models that rely on separate modules or diffusion models for generation, VILA-U employs a unified vision tower to discretize visual inputs, aligning them with textual tokens through contrastive learning. From the experiments, the authors show that VILA-U can achieve state-of-the-art performance in both image generation and comprehension." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Missing the clarification between VILA-U and other tokenization-based multimodal models, like AnyGPT [1] and SEED-LLaMa [2]. Those models also used visual tokenizers to discrete the images and trained with causal language modeling loss. I noticed the authors cite the SEED-LLaMa in the line 102, but the claim of “In this work, we design our framework based on the autoregressive next-token prediction method for visual generation and make our VLM learn to generate visual content effectively.” does not the main difference between VILA-U and SEED-LLaMa.\n\n2. One of the claimed contributions of this paper is about proposing the training strategy for the unified foundation vision tower. However, the training strategy seems similar to SEED [3], which also used contrastive loss between image embeddings and text embeddings. Can authors clarify the difference between the unified foundation vision tower and SEED?\n\n3. Comparisons with other tokenization-based multimodal models [1,2] and Emu2 [4] are missing.\n\n4. The limitation section, which is required, is missing.\n\n[1] Zhan, Jun, et al. \"Anygpt: Unified multimodal llm with discrete sequence modeling.\" arXiv preprint arXiv:2402.12226 (2024).\n\n[2] Ge, Yuying, et al. \"Making llama see and draw with seed tokenizer.\" arXiv preprint arXiv:2310.01218 (2023).\n\n[3] Ge, Yuying, et al. \"Planting a seed of vision in large language model.\" arXiv preprint arXiv:2307.08041 (2023).\n\n[4] Sun, Quan, et al. \"Generative multimodal models are in-context learners.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "My biggest suggestion/question is related to the number 1 weakness described above. If the author could highlight the main contribution of the work that would make its positioning much easier. One positioning that was left out in the weakness section above is to position the work as the \"first\" in some regards. However, while autoregressive modeling of text + language is a burgeoning field, VILA-U is not the first model that performs autoregressive modeling of multiple modalities." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The unification of multiple modalities in the same architecture (with the same training objective) is a very important topic. The paper is a valuable contribution to this overall research program. In the current work, the choice of quantized image tokens for image representation makes the autoregressive modeling task more natural as the image modality is tokenized into discrete tokens much like language. This helps minimizes the amount of code development required for adapting existing LLM code bases to their multimodal counterparts.\n2. The paper performed fairly complete evaluations (image-text, video-text, text-image, ) and ablation studies that include model backbone and training objective." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper, VILA-U presents a unified framework of autoregressive multimodal generation and understanding. It achieves this by first training a vision encoder (discretized via RQ codebook) for text-conditioned image tokens (initialized from CLIP) and then training image+text data using autoregressive modeling. It presents a complete training recipe for creating autoregressive multimodal models, and the resulting model is benchmarked against a wide range of existing models across tasks (generation and understanding)" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. It is not clear to me how to position the work in its novelty or effectiveness and this may be addressable with some rewriting. I see 3 potential angles\n 1. Training effectiveness by leveraging pretrained networks. The authors motivates the work by emphasizing that existing methods that attempt to unify multimodal generation and understanding either require significant architectural modifications to their uni-modal counterparts, or training from scratch. However, this comparison seems not to play a central role in the subsequent discussions. If the effectiveness of the proposed method is reflected in ease of training, then readers would expect to see comparison of training time/compute for comparable performances. \n 2. Effective token representation of image modality as discrete tokens: VILA-U differs from prior work in its adoption of RQ-VAE embedding for images. However, if this is the main innovation, the choice of RQ, its superiority over alternative methods, the important of discontinuous embedding of images (as compared to, for example, continuous embedding as in LaViT) will need to be elevated.\n 3. State-of-the-art performance: If the main contribution is instead just the shear effectiveness of the method. Then it should demonstrate this quantitative in the paper. Unfortunately, the comparison tables doesn’t seem to suggest that the VILA-U model is the state-of-the-art in most benchmarks. Perhaps it achieves Pareto frontier between understanding and generation tasks? Or outperforms other models for the same training compute/time? Either way I’m not clear what the main advantage of the current work is over others. \n2. The discussion around training recipe is very important and useful for practitioners. However, it lacks both quantitative and qualitative (with examples) comparisons of the different training recipes. With the conclusion seems to be use an aligned CLIP model for image encoder initialization, which doesn’t seem to be a novel finding. I would recommend either supporting the discussion with more evaluation (quantitive or qualitative, ideally both) or moving the discussion to the appendix.\n3. The paper suffers from unsubstantiated claims ( neither references nor experimental support). I've highlighted a few statements that are very important for the message in the paper below:\n - \"replacing continuous tokens with VQ tokens in VLMs usually results in a severe performance drop\"\n - \"A straightforward combination of contrastive and reconstruction loss cannot converge\"\n - \"both the rFID and Top-1 accuracy of the vision tower only serves as a medium indicator instead of directly linearly correlated to the final performance of our whole multi-modal framework.\"" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "All datasets used are public, no ethics review needed." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The solid experimental results of VILA-U have largely reignited my confidence in the autoregressive image-text unified modeling direction. However, why is there no comparison with other text-image unified modeling models such as \\textbf{MM-Interleaved, SEED, and DEEM} on image understanding tasks? Ignoring the contributions of pioneers is not advisable.\n\n2. The video generation experiments are insufficient. Why not compare with methods like \\textbf{OpenSora} and \\textbf{CogVideoX} on \\textbf{VBench}?\n\n3. The article is unclear in its expression; are the visual tokens features directly discretized by the visual encoder, or are they encoded by a large language model? I suspect it is the former.\n\n4. VILA-U claims to have lower computational complexity and to avoid misalignment. While I recognize the importance of addressing misalignment, the claim of lower complexity requires experimental support. Specifically, compared to unified autoregressive image-text modeling models, using separate models like fine-tuning Stable Diffusion can also construct end-to-end autoregressive image-text modeling, which is more efficient in training and performs better. Moreover, utilizing existing mature acceleration schemes offers fast speeds. VILA-U should emphasize more on data cleansing quality and misalignment.\n\n5. Lastly, and most critically, I hypothesize that the structural improvements of the model provide minimal benefits compared to previous autoregressive unified models, with the majority of improvements stemming from the engineered data cleansing. For instance, MMC4-Core contains 22.4M data while MMC4 has 375M, yet some research indicates that training with these two datasets yields similar outcomes. Large-scale datasets like MMC4 are of very low quality. However, using just 6M of data to achieve excellent results suggests that your data is meticulously filtered, yet the paper lacks any detail on the core contributions of data construction. Conducting experiments on the same data with other model structures like \\textbf{DreamLLM} is necessary to demonstrate the efficiency of \\textbf{VILA-U}. \n\nI will improve my rating score if my concerns are addressed." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The idea of VILA-U is very straightforward, and the experiments are solid. It significantly enhances the capabilities of end-to-end autoregressive multimodal models in visual-language tasks, bridging the gap between autoregressive multimodal models and the LLAVA series, while also excelling in image generation.\n\n2. The structure of the VILA-U paper is simple and easy to read, and the model implementation is very easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Summary:\n\nVILA-U is a foundation model that unifies video, image, and language understanding and generation. Unlike traditional models that use separate components for different tasks, VILA-U simplifies this by employing a single autoregressive framework. This reduces misalignment and maintains near state-of-the-art performance in both understanding and generating visual language content. Key factors for its success include a unified vision tower that aligns visual and textual inputs, enhancing perception, and the ability to achieve high-quality image generation similar to diffusion models.\n\nContributions:\n\n1. VILA-U strives for an end-to-end autoregressive model that handles both visual and textual inputs through a unified next-token prediction approach. This approach eliminates the need for external components like diffusion models, simplifying the infrastructure.\n2. VILA-U is tested across a range of tasks, including image-language and video-language understanding, as well as image and video generation. It demonstrates notable improvements, particularly in narrowing the gap between autoregressive and continuous-token models in visual understanding, while also offering robust visual generation capabilities." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.Regarding the issue of missing in context learning assessments, VILA-U has undergone extensive training on image-text sequences and can accept any interleaved layouts of images and text. Therefore, it should possess excellent contextual learning abilities. This work could be enhanced by conducting tests on its ICT capabilities.\n\n2.The description of the data curation process is not sufficiently clear, making it uncertain whether the data was meticulously selected or randomly chosen. If it is the former, I suspect that most of the improvements stem from high-quality data engineering rather than advancements in model architecture." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "VILA-U is a Unified foundation model that integrates Video, Image, Language understanding and generation. It employs a single autoregressive next-token prediction framework for both tasks." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024vilau,\ntitle={{VILA}-U: a Unified Foundation Model Integrating Visual Understanding and Generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=02haSpO453},\nnote={under review}\n}" }, "abstract": { "value": "VILA-U is a Unified foundation model that integrates Video, Image, Language understanding and generation. Traditional visual language models (VLMs) use separate modules for understanding and generating visual content, which can lead to misalignment and increased complexity. In contrast, VILA-U employs a single autoregressive next-token prediction framework for both tasks, eliminating the need for additional components like diffusion models. This approach not only simplifies the model but also achieves near state-of-the-art performance in visual language understanding and generation. The success of VILA-U is attributed to two main factors: the unified vision tower that aligns discrete visual tokens with textual inputs during pretraining, which enhances visual perception, and autoregressive image generation can achieve similar quality as diffusion models with high-quality dataset. This allows VILA-U to perform comparably to more complex models using a fully token-based autoregressive framework." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Unified Visual Language Model", "Autoregressive Model" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/50e98a8144a1cddb2de5c13e4af3f3a5a157d4f3.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "VILA-U: a Unified Foundation Model Integrating Visual Understanding and Generation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
02kZwCo0C3
SAIL: Self-improving Efficient Online Alignment of Large Language Models
main
Active
RLHF;Alignment;Online Alignment;Self-Play
alignment, fairness, safety, privacy, and societal considerations
3;6;6;8
4;3;4;4
3;3;3;4
3;3;4;3
2;4;2;4
5.75
3.75
3.25
3.25
3
-0.080845
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* Reward margin and offline-reward evaluation is interesting by itself and could provide information of the effectiveness of the method, but I personally think is not as an important measurement as pairwise winrate. Could you elaborate on Section 6.1 why one should consider looking into it?\n\n* Please check the questions in weaknesses as well!" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* The authors test of two LLM-as-a-Judge benchmarks as well as on a well-established classification benchmark, and their results are consistent.\n* The authors provide a theoretical explanation of why their method works effectively.\n* Showing all possible combinations at Figure 2 helped understanding what kind of online RLHF methods one should consider\n* The results are consistent across smaller models (0.5B) up to widely used scale models (8B)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Compared to offline RLHF methods, online RLHF methods empirically show stronger performance, yet is computationally expensive, vulnerable to distribution shifts and lacks a unified framework. The authors ablate different online RLHF methods based on all possible combinations (namely, SAIL-PR, SAIL-PP, SAIL-DP) which could be useful for future work exploring online RLHF methods. Personally, it was surprising that SAIL-PP generally works on par or slightly better than SAIL-PR, which open up further research questions on what would be the optimal way to obtain preference dataset." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* As a practitioner, at least the presentation/writing wasn't clear enough to agree that SAIL provides a unified framework for those who might want to consider using online RLHF in future works. I would personally suggest adding a section explains about how one could use SAIL instead of iterative DPO methods, as well as a huge emphasis on how the provided code could be used.\n* There is a huge emphasis on trying to improve reward models (on RewardBench) to mitigated reward model overoptimization & train better LMs. I am curious if given a fixed budget/time limit, whether one should try to employ online RLHF methods or try to enhance reward models in general.\n* I would suggest adding an explanation of what is the limitation of online RLHF methods that the paper could not address. For example, it is still unclear on what is the best practice to \"whether to discard instances from a preference dataset that have a subtle difference on the preference strength\" or \"would it be beneficial to employ more models when gathering responses when consisting a preference dataset\"." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "There is a large amount of blank space below Section 6.1. Is there any missing content in this part of the paper?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. Introducing Bi-level Preference Optimization: The process of bi-level preference optimization is integrated into the modeling of online RLHF. By leveraging the unique correspondence between the reward function and the LLM policy, this approach innovatively transforms the process into an equivalent single-layer form that is easier to solve.\n\n2. Extensive Experiments on SAIL: Comprehensive and rich experiments were conducted to address the three significant challenges in online RLHF and to demonstrate the relevant applications of SAIL." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors identify three significant challenges in online RLHF algorithms: Challenge 1: the interdependence between models and data in implicit reward learning; Challenge 2: the computational complexity of bi-level optimization; and Challenge 3: the reliance on preference oracles. They propose SAIL to address these challenges. \n\nThe main contributions of the paper can be summarized as follows:\n\n1. **Unified LLM Alignment Mathematical Framework**: The authors have designed a principled online RLHF framework that provides concrete guidance for generating new responses, assuming the existence of a preference oracle.\n\n2. **Adaptive Direct Preference Optimization**: By introducing a DPO-style analysis, the authors present an efficient single-layer solution capable of effectively addressing distribution shifts and providing a scalable online preference optimization method.\n\n3. **Introduction of a Self-Improvement Mechanism**: This mechanism reduces the reliance on preference oracles.\n\n4. **Extensive Experimental Evaluation**: The experiments conducted demonstrate that SAIL significantly outperforms baseline methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Regarding the three variants of the SAIL method, Table 3 shows that in the Eval-Reward and MT-bench columns, the SAIL method performs worse than the baseline DPO. Please clarify whether these experimental results undermine the assertion that the SAIL method is superior to the baseline DPO." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. The paper demonstrates SAIL's efficiency with models up to 8B parameters. Could you share any considerations or expected challenges for scaling SAIL to significantly larger models, such as those with over 100B parameters?\n\n2. SAIL currently relies on the Bradley-Terry preference model. Have you considered experimenting with other preference models, and do you anticipate any impact on alignment performance if different utility functions are used?\n\n3. SAIL-DP seems to show some overfitting on in-distribution responses. Could you discuss any regularization techniques you considered or plans to mitigate this, particularly to enhance generalization to out-of-distribution data?\n\n4. Given the dependence on an initial offline dataset, how does SAIL perform in situations with minimal or noisy initial data? Are there strategies within the current framework to mitigate issues arising from a limited initial dataset?\n\n5. Could you provide more detail on the computational costs of SAIL, particularly in comparison with other RLHF approaches? How does the single-level optimization approach compare in terms of resource requirements, and what practical considerations should be kept in mind when implementing it?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "1. **Innovative Formulation**: The paper provides a novel formulation of online RLHF through bilevel optimization, enhancing computational efficiency by reducing this problem to a single-level optimization, which is a significant advancement for practical LLM training.\n2. **Effective Self-improvement Mechanism**: SAIL effectively addresses challenges related to reliance on preference oracles, making online alignment more feasible by leveraging the model's self-generated responses for iterative improvement.\n3. **Comprehensive Evaluation**: The paper includes extensive experiments that demonstrate substantial improvements in evaluation reward, win rate, and efficiency over other methods like DPO, supporting SAIL's efficacy and computational advantage.\n4. **Scalability and Adaptability**: SAIL’s approach to handling distribution shifts and reducing oracle reliance presents a promising method for more scalable RLHF applications, especially for emerging large-scale LLMs.\n5. **Detailed Experiment Design and Baselines**: The experiment section is well-structured, covering a range of metrics (reward-margin, eval-reward, win rate) and configurations (SAIL-PR, SAIL-PP, SAIL-DP), providing insights into the trade-offs and performance across different setups." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces SAIL (Self-improving Efficient Online Alignment), an approach for online reinforcement learning from human feedback (RLHF) that aims to align large language models (LLMs) with human preferences. SAIL addresses limitations in offline RLHF methods by framing online LLM alignment as a bilevel optimization problem, which it reduces to a single-level first-order optimization method to enhance computational efficiency. The approach allows for continuous model improvement by generating samples iteratively, regulating preferences, and exploring online feedback. SAIL's self-improvement mechanism enables it to reduce reliance on preference oracles, thus allowing for more scalable alignment. Empirical evaluations demonstrate significant performance improvements over standard RLHF baselines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. **Limited Exploration of Alternative Utility Functions**: The method relies on the Bradley-Terry preference model, which may not be optimal for all RLHF applications. Future work could benefit from exploring alternative utility models that account for more nuanced preference data.\n2. **Scalability Concerns for Larger Models**: Although the paper demonstrates SAIL’s effectiveness on LLMs with up to 8B parameters, additional scaling experiments would strengthen the paper's claims about computational efficiency for significantly larger models.\n3. **Dependency on Initial Offline Dataset**: While SAIL reduces oracle dependency, it still relies on an initial offline dataset to bootstrap alignment. Further discussion on managing this dependency, especially when starting with limited labeled data, could be beneficial.\n4. **Potential Overfitting in SAIL-DP**: The paper mentions that SAIL-DP shows signs of overfitting on in-distribution responses, suggesting that the method may benefit from more refined regularization techniques to ensure robust generalization to out-of-distribution samples." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See the weakness section." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "(1) The paper introduces a novel unified framework for online RLHF that effectively addresses the challenges of static datasets and distribution shifts.\n(2) By reducing a bilevel optimization problem to a single-level method, SAIL maintains theoretical benefits while significantly lowering computational costs, making it more practical for real-world applications.\n(3) The self-improving aspect of SAIL allows models to iteratively enhance alignment without extensive supervision, addressing the challenge of needing constant access to human preference data.\n(4) Extensive experiments validate the effectiveness of SAIL, showing substantial improvements in performance metrics compared to existing methods, thus showcasing its applicability across various datasets.\n\nI would consider rescoring if the authors can solve my concern." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper addresses the limitations of traditional reinforcement learning from human feedback (RLHF) methods for aligning large language models (LLMs) with human preferences. The authors propose a unified framework for online RLHF formulated as a bilevel optimization problem, which they simplify to a single-level method for efficiency. This approach, called SAIL, allows for continuous model improvement through online exploration and iterative refinement of preference labels, mitigating issues related to distribution shifts and reducing reliance on static preference oracles. Experimental results demonstrate significant performance gains, with SAIL outperforming state-of-the-art RLHF methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(1) The method does not improve much in the AlpacaEval 2.0 Score. The author should give a detailed explanation. And why not use metrics like length-controlled win rate?\n(2) Authors should compare more advanced preference optimization algorithms like ORPO and SimPO. And current results are not impressive for the alignment community.\n(3) Why did the author just include MMLU as the downstream task metric? They should incorporate more tasks (eg., arc-challenge) like the similar self-improvement work SPIN (ICML24) to better illustrate their contribution.\n(4) In the alignment area, it's better to conduct experiments in the Arena-Hard benchmark since it's a common metric to evaluate the alignment ability." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We introduce SAIL, an efficient online RLHF approach that addresses distribution shift and reduces reliance on preference oracles for improved LLM alignment." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024sail,\ntitle={{SAIL}: Self-improving Efficient Online Alignment of Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=02kZwCo0C3},\nnote={under review}\n}" }, "abstract": { "value": "Reinforcement Learning from Human Feedback (RLHF) is a critical method for aligning large language models (LLMs) with human preferences. However, existing offline alignment approaches, such as DPO, IPO, and SLiC, rely heavily on static datasets of human preferences, often leading to suboptimal performance. Recent efforts in the literature have moved towards online RLHF methods, but they lack a unified framework and suffer from distribution shift issues. In this work, we formalize online LLM alignment as a bilevel optimization problem. By reducing this formulation to a more computationally efficient single-level first-order method, utilizing reward-policy equivalence, we propose SAIL (Self-improving Efficient Online Alignment).SAIL generates new samples and iteratively refines model alignment through online exploration and regulation of preference labels. This enables continuous, self-improving alignment and generalizes prior online RLHF methods as special cases. Compared to state-of-the-art RLHF methods, SAIL delivers significant performance gains, with up to 11.6\\% improvement in win rate and a 3.6-point increase in evaluation rewards, while maintaining low computational overhead." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "RLHF", "Alignment", "Online Alignment", "Self-Play" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/d5a230b3d82181e94b8d74fb961b8cc3abd38e94.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/0eb1dbe2bdc367b1d3e0552efcb28e056a4766bb.zip" }, "title": { "value": "SAIL: Self-improving Efficient Online Alignment of Large Language Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
03EkqSCKuO
Port-Hamiltonian Architectural Bias for Long-Range Propagation in Deep Graph Networks
main
Active
graph representation learning;long-range propagation;ordinary differential equations
learning on graphs and other geometries & topologies
5;6;8
2;3;3
2;4;4
2;2;3
3;3;3
6.333333
2.666667
3.333333
2.333333
3
0.755929
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. While the paper shows good empirical performance, it's unclear what types of problems would theoretically benefit most from a port-Hamiltonian approach versus standard message passing. Could the authors provide analysis or insights about which properties of the underlying data generation process would suggest using their method?\n\n2. The authors demonstrate that adding dissipative components often improves performance, but how does the balance between conservative (Hamiltonian) and dissipative parts relate to the nature of the task? It would be valuable to see an analysis of when pure conservation might be preferable to including dissipation, and how practitioners should choose this balance for new problems.\n\n3. Given that the method is inspired by physical Hamiltonian systems, it's surprising that there are no experiments on problems with actual Hamiltonian dynamics (e.g., molecular dynamics simulations). Such experiments could help validate whether the method's conservation properties provide advantages for physically meaningful conservation laws, beyond just improving general information flow." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "Overall, the idea of using Hamilton's dynamics for GNN is attractive, though not entirely new. \nNevertheless, the paper is solid and is more general than existing approaches. \nThe improved efficiency over previous Hamiltonian GNN approaches and the practical benefits for long-range propagation make it a useful contribution to the field.\nThe experimental results are also interesting. \n\n1. Technical soundness:\n- Clear theoretical analysis of conservation properties\n- Explicit connection between Hamiltonian dynamics and message passing\n- Thorough experimental validation\n\n2. Practical value:\n- More efficient than previous Hamiltonian GNN approaches\n- Good performance on long-range tasks\n- Can incorporate different message passing schemes\n\n3. Clarity:\n- Well-structured presentation\n- Good balance of theory and empirics\n- Clear comparisons to prior work" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces port-Hamiltonian Deep Graph Networks (PH-DGN), a new framework for graph neural networks that addresses the challenge of long-range information propagation. The approach embeds message passing within a port-Hamiltonian dynamical system framework, where node states are split into position (q) and momentum (p) components coupled through skew-symmetric matrices. The Hamiltonian function incorporates graph structure through neighborhood aggregation functions, and the port-Hamiltonian extension introduces learnable dissipative components (internal dampening and external forces) that allow the network to balance conservative information preservation with task-dependent information modification. The authors provide theoretical analysis of the framework's properties, including bounds on sensitivity and gradient behavior, and demonstrate empirically that their method outperforms existing approaches on several tasks requiring long-range information propagation, including synthetic graph property prediction tasks and real-world molecular property prediction. The framework can incorporate different message passing schemes and provides improved efficiency compared to previous Hamiltonian-based graph neural networks, with experimental results showing that while the purely conservative version performs well, including the dissipative components often leads to better task performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Novelty is incremental:\n- Builds on existing ideas (Hamiltonian GNNs, message passing)\n- Main contribution is combining these effectively rather than fundamentally new concepts\n\n2. Technical questions:\n- The derivation of the discretization scheme could use more justification\n- Some assumptions about the structure of $W$ and $V$ matrices for explicit updates feel restrictive\n- Could better explain why port-Hamiltonian framework is more appropriate than simpler alternatives\n\n3. Empirical:\n- Some ablation studies could be stronger (e.g., analyzing impact of different dissipative terms)\n- Could better justify hyperparameter choices" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N.A." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "**Questions**\n\n- l.161: What is the definition of anti-derivative of an activation function $\\sigma$?\n- l.223, l.228: Which norm is used in $\\| \\partial \\mathbf{b}_u(T) / \\partial \\mathbf{b}_u(T-t) \\|$? \n- l.259: *Therefore, [...]. , it holds the capability of PH-GDN to perform long-range propagation effectively*: the authors compare the upper bounds of sensitivity for the existing model (Theorem 2.3) and the proposed model (Theorem 2.4), then argue that since the latter is larger than the former, the proposed model is more effective in performing long-range propagation. However, it is difficult to claim so because there is the possibility that Theorem 2.4 only shows a looser bound than Theorem 2.3, which does not exclude the possibility that the upper bound does not properly reflect the sensitivity of the PH-GDN. It should be shown theoretically or experimentally that this upper bound adequately reflects the sensitivity of the PH-GDN.\n- l.1129: I want to clarify the setup of the Graph Transfer Task: If I understand correctly, the feature vectors of all nodes are 1-dimensional and randomly sampled from $\\mathrm{Unif}([0, 0.5))$. The target value is 0 for the source node and 1 for the target node. Assuming that this problem setup is correct, I need help understanding why this task is solvable because the model cannot distinguish source or target nodes from other nodes from feature vectors.\n\n\n**Minor Comments**\n\n- l.196: *non-dissipative (i.e., long-range)*: I think this is a slightly misleading expression. My understanding is that this paper uses non-dissipative in the sense of energy-perserving. Although non-dissipative implies long-range propagation (Theorem 2.3), they are not equivalent. In fact, this paper argues that non-dissipative PH-DGN performs better than conservative PH-DGN in predicting the LRGB dataset in some numerical experiments.\n- l.436: The use of position encoding is only mentioned in the caption of Table 2 and should be explicitly stated in the text, specifically in the setup of Section 3.4.\n- l.436: The correspondence between Table 2 and Table 5 needs to be clarified. For example, the method titled *MPNNs* in Table 2 is titled *re-evaluated* in Table 5. However, GCNII is not listed as *re-evaluated*, but as *MPNNs*. Furthermore, it is difficult to tell from the captions of Table 5 whether the authors test each listed method by themselves or is a citation of existing research. The reference should be indicated for each method in Table 5, for example, by adding a column such as *reference* to Table 5.\n- l.443: *Overall, our port-Hamiltonian framework [...] shows great benefit [...] without requiring additional strategies such as global position encoding, global attention mechanism, or rewiring techniques [...].*: I suggest clarifying how the usefulness of each strategy is shown by comparing it with existing methods. More specifically:\n - The superiority of the proposed method over global position encoding is justified by the comparison with MPNN-based models using position encoding.   \n - The superiority over the global attention mechanism is by comparison with the Transformer-based method.\n - The superiority of rewiring methods by comparison with Drew.\n- l.1153: The reference to Adam (Kingma & Ba, 2015) should be cited.\n- l.1343: n. layers -> the number of layers\n- l.1353: Table 6 claims that PH-DGN achieves both Hamiltonian conservation and Learnable driving forces. However, I think this is misleading because to achieve Hamiltonian conservation, the dissipative component must be removed, resulting in $\\mathrm{PH-DGN}_{\\mathrm{C}}$. It is not possible to achieve both characteristics at the same time in one architecture. Instead, two variants that achieve only one of the two are proposed." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The background knowledge of the port-Hamiltonian system is explained carefully, increasing accessibility for readers unfamiliar with this topic.\n- For conservative PH-DGN, the ability of long-range interaction is theoretically shown by giving the lower bounds of sensitivity.\n- The validity of the proposed methods for synthesis datasets is properly demonstrated." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes PH-DGN, a new GNN based on the port-Hamiltonian system to develop GNN that can solve graph learning tasks that require long-range dependencies. Two variants of PH-DGN are proposed: a conservative PH-DGN based on the Hamiltonian system and a PH-DGN based on the port-Hamiltonian system by introducing learnable dissipative terms. \nThe theoretical analyses show that the conservative PH-DGN is stable and energy-preserving as a dynamical system, and derive a lower bound for the sensitivity, implying the possibility of long-range interaction.\nNumerical experiments show that the conservative PH-DGN is energy-preserving without gradient vanishing using synthesis datasets (Section 3.1) and that the long-range interaction can be achieved in graph learning tasks that require long-range interactions (Sections 3.2, 3.3). Also, the usefulness of two variants of PH-DGN is evaluated on long-range graph benchmarks of real datasets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- As the title indicates, the theme of this paper is the relationship between GNN and long-range propagation based on the port-Hamiltonian system. However, no theoretical guarantees on long-range propagation are given for general PH-DGNs with dissipative components.\n- The tables of experimental results could be clearer (in particular Tables 2 and 5).\n- For $\\mathrm{PH-GDN}_{\\mathrm{C}}$, which has theoretical guarantees, the prediction performance on the real dataset (long-range graph benchmark) does not outperform existing methods, and hence its practical usefulness is limited." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- How would classical GCN aggregation interact with Theorem 2.4? Can the bound be easily extended for that case?\n- In Section 3.1, you mention that the growing behavior can be controlled by regularizing the weight matrices or using normalized aggregation functions. Did you try this? How are the empirical results?\n- Have you examined the interpretability of a trained PH-DGN? In particular, do the learned non-conservative forces make sense for the associated problem?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- There is very clear motivation to this work, and it builds nicely upon other references.\n- The proposed port-Hamiltonian approach is an original and clever way to allow for non-conservative dynamics in a graph network while still maintaining long-range message passing.\n- The theoretical results for the conservative case are strong, and the motivation and interpretation for these are presented nicely.\n- The experimental setup is superb; the care taken to ensure ease of replication is applauded. Model details are presented very clearly and choices are explained well for each step of the setup.\n- A strong suite of models are compared against, with many different competitors and different approaches. The consistently favorable results provide a great deal of strength to claims of the proposed method's performance.\n- The appendices are comprehensive for both proofs and experimental setup, and made clear many of the questions I had on an initial read." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work provides a novel methodology for the incorporation of port-Hamiltonian dynamics in graph representation learning. The central model, called a port-Hamiltonian Deep Graph Network (PH-DGN), is introduced first in a purely conservative setting using only the Hamiltonian and no non-conservative terms. Several theorems are developed to show that this conservative case leads to long-range data propagation between graph nodes, where the graph dynamics exhibit no energy loss and gradients do not vanish as the backward sensitivity matrix is bounded below. Dissipitive forces are then added to the full port-Hamiltonian model, which are two additional networks that may be trained to capture non-conservative forces. Several experiments follow, including a showcase of energy conservation and sensitivity to empirically verify theoretical work, and a graph transfer problem and graph property prediction problem to compare performance on benchmark tasks against other graph models in settings which require long-range information propagation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The majority of the theoretical results are developed for the conservative case. This makes sense in the context, as conservative long-range message passing is stated as a goal, but I would also be quite interested to see what could be proven for the fully general port-Hamiltonian case.\n- In the explanation of Theorem 2.3, the statement that \"the final representation of each node retains its complete past\" seems somewhat strong. While I understand that the BSM result shows the influence of the entire past history on the current state, this statement as written seems to imply something stronger, and perhaps could be made more clear.\n- The dissipitive force terms are added in some experiments to great success, but the explanations of their performance are more intuitive and are not supported by hard data in the paper. There may be a great opportunity here for visualization to support the intuitive claims.\n\nThere are two very minor typos:\n- In the first paragraph of Section 2, \"node states' in graph\" has an unnecessary apostrophe.\n- In Appendix D.3, \"on a grid of n. layers\" has an unnecessary period." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024porthamiltonian,\ntitle={Port-Hamiltonian Architectural Bias for Long-Range Propagation in Deep Graph Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=03EkqSCKuO},\nnote={under review}\n}" }, "abstract": { "value": "The dynamics of information diffusion within graphs is a critical open issue that heavily influences graph representation learning, especially when considering long-range propagation. This calls for principled approaches that control and regulate the degree of propagation and dissipation of information throughout the neural flow. Motivated by this, we introduce port-Hamiltonian Deep Graph Networks, a novel framework that models neural information flow in graphs by building on the laws of conservation of Hamiltonian dynamical systems. We reconcile under a single theoretical and practical framework both non-dissipative long-range propagation and non-conservative behaviors, introducing tools from mechanical systems to gauge the equilibrium between the two components. Our approach can be applied to general message-passing architectures, and it provides theoretical guarantees on information conservation in time. Empirical results prove the effectiveness of our port-Hamiltonian scheme in pushing simple graph convolutional architectures to state-of-the-art performance in long-range benchmarks." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "graph representation learning", "long-range propagation", "ordinary differential equations" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/b592c630c57b079492b46ee2305981f0576b4905.pdf" }, "presentation": null, "primary_area": { "value": "learning on graphs and other geometries & topologies" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Port-Hamiltonian Architectural Bias for Long-Range Propagation in Deep Graph Networks" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
03OkC0LKDD
The Vital Role of Gradient Clipping in Byzantine-Resilient Distributed Learning
main
Active
Byzantine resilience;distributed machine learning
optimization
3;5;6;6
4;3;5;3
1;2;2;3
2;3;4;3
3;3;3;3
5
3.75
2
3
3
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please focus on the weakness of Theorem 5.2 in the rebuttal. Specifically, please compare the value of $v\\epsilon_0$ with $\\zeta^2$ in Theorem 5.2 (or address the concern in some different ways). I am willing to raise the score if the concerns are properly addressed." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "1. This paper is generally well-written. \n2. The idea of adaptive clipping intuitively makes sense and has an excellent empirical performance in the experiments of this work.\n3. Byzantine resilience in distributed learning is an important and timely topic." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel strategy called Adaptive Robust Clipping (ARC) for Byzantine-resilient distributed machine learning. Empirical results show that using ARC can significantly enhance Byzantine resilience compared to the methods without clipping. Theoretical analysis of convergence is also provided to show the effect of ARC." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Although the proposed ARC strategy is generally not hard to implement and has a good empirical performance, there are major concerns about the theoretical analysis in this paper, which I specify point by point below. \n\n1. The theoretical results in section 3 show that $F\\circ ARC$ is $(f,3\\kappa)$-robust when $F$ is $(f,\\kappa)$-robust (Theorem 3.2). Although the property of $ARC$ is much better than trivial clipping (as shown in Lemma 3.1), the convergence guarantee obtained from Theorem 3.2 for $F\\circ ARC$ is worse than that for $F$ (without $ARC$). In other words, the theoretical results in section 3 show that $ARC$ has a better property than trivial clipping, but do not show that using $ARC$ can improve the convergence guarantees.\n\n2. The improvement of convergence guarantees for $ARC$ is mainly shown by Theorem 5.2. Theorem 5.2 says that when the maximum gradient norm of the initial point is not larger than $\\zeta$, using $ARC\\circ F$ can guarantee to find a point $\\hat{\\theta}$ such that the square norm of the gradient at $\\hat{\\theta}$ is not larger than $v \\epsilon_0$ in expectation. However, $v \\epsilon_0$ can be much larger than $\\zeta^2$ (which is specified in the next paragraph). Briefly speaking, the result of Theorem 5.2 can be even weaker than the conditions, which makes the theorem meaningless. \n- Since $\\xi \\leq \\min(\\frac{v}{\\Phi(G,B,\\rho)},\\xi_0)$, it is obtained that $\\xi \\leq \\frac{v}{\\Phi(G,B,\\rho)}$, and thus $v\\geq \\xi \\cdot \\Phi(G,B,\\rho)=\\xi\\cdot 640(1+\\frac{1}{B^2})^2(1+\\frac{B^2\\rho^2}{G^2}).$ Therefore, \n$v\\epsilon_0 \\geq [\\xi\\cdot 640(1+\\frac{1}{B^2})^2(1+\\frac{B^2\\rho^2}{G^2})]\\cdot[\\frac{1}{4}\\cdot\\frac{G^2(f/n)}{1-(2+B^2)(f/n)}].$ The term $\\rho^2=\\exp (2\\frac{(2+B^2)\\Delta_0}{(1-\\xi_0)G^2}L)\\zeta^2$ can be much larger than $\\zeta^2$. Thus, $v\\epsilon_0$ can be much larger than $\\zeta^2$, which will make Theorem 5.2 meaningless.\n\nOverall, the idea of ARC is interesting. The ARC method is easy to implement and has a good empirical performance. However, the theoretical analysis in the current version does not show the improvement of convergence guarantees, and can be misleading." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* How does NNM contribute to achieving the guarantees outlined in Theorem 5.2? Is it possible to attain similar results on robust aggregators without incorporating NNM?\n* Using a fixed clipping threshold can often be effective in homogeneous Byzantine settings. How does the adaptive approach perform compared to a fixed threshold in such cases?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper proposes an adaptive method that maintains the robustness guarantees of the aggregators it employs while improving their practical performance, especially under high heterogeneity. The authors provide valuable insights into selecting the clipping threshold, demonstrating that a fixed threshold for all workers, commonly used in practice, may be inefficient in some cases and does not meet robust criteria. They also emphasize the gap between Byzantine theory and practical applications, highlighting that existing theory may not fully capture real-world performance." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces an adaptive gradient clipping method applied to worker outputs before passing them through a robust aggregator in heterogeneous synchronous Byzantine settings. The authors address a practical issue, as they observe that while fixed gradient clipping can enhance performance in some cases, it may also impair it in others. To ensure robustness while utilizing gradient clipping, they propose an adaptive strategy that adjusts the clipping threshold according to the magnitude of worker outputs, applying clipping selectively to only a subset of them. Experimental results across various Byzantine scenarios and robust aggregators, tested on MNIST and CIFAR-10 datasets, demonstrate the effectiveness of this adaptive approach when combined with the established NNM method. The authors further support their method with theoretical guarantees." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Considering the critical role that numerical evaluation plays in supporting the paper’s claims,\n* The paper introduces an adaptive clipping approach designed to work with any robust aggregator independently of NNM. However, the numerical results primarily showcase its effectiveness only when combined with the NNM aggregator (and it is unclear if NNM was also used in Figure 6; if so, this single example may be insufficient). Since NNM has a computational complexity of $O(dn^2)$, it would be valuable to assess the performance of this approach with other robust aggregators (without integrating NNM) to explore potentially lower computational costs. For instance, the CWTM ($O(dn \\log{n})$) or the $\\epsilon$-approximation GM ($O(dn + d\\epsilon^{-2})$) might offer alternatives that may retain robustness in practice while reducing time complexity. Conducting such experiments could provide a more comprehensive evaluation and emphasize the approach’s practicality.\n* The CIFAR-10 evaluation is somewhat limited, with only one Byzantine worker out of 17. Expanding the evaluation to include a higher proportion of Byzantine workers and testing on more complex datasets could better demonstrate the method’s effectiveness in more practical scenarios." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See section before. \n\nIn addition:\n\n-In Figure 1, C=2 for static clipping (SC) is too small. I think that is why you have a very bad performance of SC. You need to test with bigger values of C as well for SC.\n\n-Can you report the Adaptive C of ARC over steps in these plots?\n\n- Line 94, can you justify why one needs to clip this exact k number gradients? what happens if one clip less or more than this number of gradients?\n\n-The intuitive k is the number of potential malicious workers which is f?\n\n-Line 238, you require \\kappa B^2 < 1, (which means B should be small) can you give an example of loss where B is small?\n\n-Line 264, I disagree with the comment that \"ARC does not introduce a significant overhead\" especially in the case of large models. It will be good to have some experiments with runtime on the x-axis \n\n-ARC theory requires that the model is well-initialized, it will be good to assess numerically the impact of the initialization on the performance of ARC" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The main strengths are: \n\n-The authors propose Adaptive Robust Clipping (ARC), a new mechanism to enhance robustness in adversarial settings.\n\n-The authors show that ARC almost retains the theoretical robustness guarantees of existing Robust methods while enhancing their practical performance. \n\n-The authors validate ARC through several experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper explores enhancing the robustness of distributed machine learning in the presence of Byzantine clients. The authors propose Adaptive Robust Clipping (ARC) that improves the robustness of Robust-DGD beyond traditional static clipping techniques. ARC dynamically adjusts clipping thresholds based on gradient magnitudes, allowing it to better counteract adversarial impacts.\nThe authors provide experiments to demonstrate that ARC improves model robustness against various attacks compared to static clipping methods as well as theory showing that ARC has a similar convergence rate as the classical ones known in the literature." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The main weaknesses are:\n\n-Increased complexity produced by ARC in practical implementation\n\n-ARC performance depends on good model initialization which may degrade the performance in the case of poor initialization.\nDid you try some experiments to assess this?\n\n-While ARC improves robustness by adaptively clipping gradients, its thresholding could risk clipping too aggressively in certain settings, potentially discarding useful gradient information." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1) Why were no experiments ran using static clipping?\n2) What is the reason for only using a network with 10 agents? Typically, networks with more agents that test for heterogeneity have a harder time than those with networks because there is a wider gap on intra and inter-class datasets.\n3) What was the reason for selecting 17 agents for the simulations in the section \"Improved robustness on CIFAR-10\"? Can you expand to more agents?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper introduces dynamic gradient clipping as an improvement to its static counterpart. More importantly it proves that the static approach is not robust, in accordance with the definition of robustness given in the paper. Furthermore, the authors prove the robustness of their approach and provide empirical evidence of the utility of their algorithm." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduce a variation on the static clipping technique used to overcome byzantine attacks in federated learning. Their algorithm makes the process dynamic by adapting the clipping value to the input gradients themselves; this algorithm, called ARC, or Adaptive Robust Clipping, is proved to be robust: (f, 3k)-robust. More importantly, the authors prove that static clipping breaks the standard (f,k)-robustness, which highlights the shortcomings of the empirical results demonstrated in the papers highlighted in paragraph five of the Introduction. These reasons motivate the need for a dynamic approach to gradient clipping. ARC was paired with and tested using the following techniques: coordinate-wise trimmed mean, geometric median, multi-Krum, and nearest-neighbor mixing. Various simulations were ran by the authors to show the utility of ARC, these include: simulations on varying degrees of data heterogeneity, simulations on varying f (the number of malicious agents), simulations showing the improved breakdown point. All of these simulations show how ARC can provide robustness.\n\nA current problem point is that the authors perform simulations and demonstrate against not using gradient clipping. In paragraph 5 of the Introduction, the authors clearly state that static methods are a problem that their approach, ARC, solves. Then, the authors proceed to perform simulations and do not compare their results against static clipping, but compare against no clipping. It is known, and evidenced by the cited work, that not clipping is a problem that is overcome by using some form of clipping. Therefore, results compared against not clipping yields no additional information. While the authors have shown that ARC has obvious utility that could help overcome known issues, readers cannot determine the excess utility over using static clipping. While I believe the paper holds merit, as the empirical results show, I do not believe the paper can proceed without the authors running the experiments again and showing the results with static gradient clipping. The comparison between static and dynamic clipping is the fundamental point of the paper and not having a comparison of the two makes the paper unqualified to proceed. If the authors can show those results, so readers, such as myself, can see how much improvement is gained by using a dynamic choice for clipping, then I believe the paper will contain enough merit to be accepted and to receive a higher score.\n\nAs a final, syntactic, comment, I believe the authors should move the Related Works section to an earlier spot in the paper so readers can more easily understand the historical context and how the motivation for the novel work. This swap will increase flow of understanding for the reader who will have to exert less mental effort to juggle the chronological and intellectual pieces together." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The major weakness of the paper, which is a critical one, is the complete lack of comparison of ARC versus static clipping. The authors run experiments against not using clipping; this is rendered moot by prior work and therefore is not a necessary point of comparison. The authors must go back and run the same experiments they ran with with static gradient clipping and plot that against their dynamic approach. Without doing this, it is not possible to determine the benefit their work over prior work." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024the,\ntitle={The Vital Role of Gradient Clipping in Byzantine-Resilient Distributed Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=03OkC0LKDD},\nnote={under review}\n}" }, "abstract": { "value": "Byzantine-resilient distributed machine learning seeks to achieve robust learning performance in the presence of misbehaving or adversarial workers.\nWhile state-of-the-art (SOTA) robust distributed gradient descent (Robust-DGD) methods were\nproven theoretically optimal, their empirical success has often relied on pre-aggregation gradient clipping.\nHowever, the currently considered static\nclipping strategy \nexhibits mixed results: improving robustness against some attacks while being ineffective or detrimental against others.\nWe address this gap by \nproposing a principled adaptive clipping strategy, termed Adaptive Robust Clipping (ARC).\nWe show that ARC consistently enhances the empirical robustness of SOTA Robust-DGD methods, while preserving the theoretical robustness guarantees. \nOur analysis shows that ARC provably improves the asymptotic convergence guarantee of Robust-DGD in the case when the model is well-initialized.\nWe validate this theoretical insight through an exhaustive set of experiments on benchmark image classification tasks.\nWe observe that the improvement induced by ARC is more pronounced in highly heterogeneous and adversarial settings." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Byzantine resilience", "distributed machine learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/1edff0639d6d0d7f61b9422fb9b09ffc1b65c0b2.pdf" }, "presentation": null, "primary_area": { "value": "optimization" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/3789db05085a506e826567c48ddd96568a104516.zip" }, "title": { "value": "The Vital Role of Gradient Clipping in Byzantine-Resilient Distributed Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
03u7pbpyeN
BEATS: Optimizing LLM Mathematical Capabilities with BackVerify and Adaptive Disambiguate based Efficient Tree Search
main
Withdraw
Large Language Models;Tree Search;Back Verification
applications to computer vision, audio, language, and other modalities
Linzhuang Sun;Hao Liang;Jingxuan Wei;Bihui Yu;Conghui He;Zenan Zhou;Wentao Zhang
~Linzhuang_Sun1;~Hao_Liang7;~Jingxuan_Wei1;~Bihui_Yu1;~Conghui_He2;~Zenan_Zhou1;~Wentao_Zhang1
3;3;5;6
5;3;4;4
3;3;4;3
1;2;2;2
4;2;3;3
4.25
4
3.25
1.75
3
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": { "value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors." } }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Could the authors provide more details on the computational trade-offs involved?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The proposed method showcases two key features—*disambiguation* and *back-verification*—that notably enhance the model's reasoning process, as confirmed by the ablation study. *Disambiguation* helps clarify problem statements at each reasoning step, reducing the likelihood of misinterpretation, while *back-verification* provides a robust mechanism to cross-check each solution against previous steps. Together, these techniques improve benchmark performance by a substantial margin." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents BEATS, a framework that enhances mathematical problem-solving in language models by introducing targeted prompting strategies that guide the model through a step-by-step approach to decompose complex problems. Furthermore, BEATS incorporates a tree search mechanism, enabling exploration of each decision step individually, which helps refine solutions iteratively. The experiments demonstrate a significant performance increase on standard benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper combines existing approaches, such as tree search and reflective reasoning techniques, but falls short of introducing transformative new methods. While effective, the design lacks substantial innovation in handling complex reasoning beyond prior approaches.\n \n- A significant issue lies in the increased computational cost introduced by the extra steps, including disambiguation and back-verification. Although these steps improve accuracy, their contribution to computational overhead is not quantified, making it challenging to assess the overall efficiency.\n\n- Despite mentioning computational challenges in the introduction, the paper lacks a thorough analysis of the actual cost implications. The pruning technique within tree search is minimalistic, relying on basic conditions to halt expansion rather than addressing cost at a fundamental level.\n\n- Some areas in the paper, particularly Section 2.3, contain formatting issues, such as duplicated author names." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. How do authors verify that the disambiguation prompt effectively resolves ambiguous problem statements? Although the ablation study indicates that this prompt improves final performance, a more detailed analysis is needed. For instance, do all problems correctly solved without the disambiguation prompt remain correct when it is applied?\n2. Which version of GPT-4 is used for evaluation? If the results are referenced from OpenAI papers or technical blogs, please provide the appropriate citations." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper presents a novel approach that combines tree search with back-verification and adaptive disambiguation to enhance the mathematical reasoning capabilities of large language models (LLMs).\n2. Ablation studies are conducted to assess the impact of key components in the proposed method, focusing on the contributions of the disambiguation and back-verification modules.\n3. The pruning in the tree search effectively reduces the problem search space, improving computational efficiency." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work investigates both prompt-based and search-based methods to enhance the mathematical reasoning abilities of large language models. The authors improve traditional search-based methods by pruning the search tree using carefully crafted prompts. A disambiguation prompt clarifies the original problem, while two additional prompts guide reasoning steps and determine search termination. Different pruning strategies are tailored to each type of prompt. The authors also introduce a self-correction mechanism called back-verification, where LLMs validate answer candidates by concatenating them with the original problem. The method’s effectiveness is evaluated across 5 math reasoning benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The proposed approach lacks substantial novelty.\n2. The selection of baselines for comparison in search-based methods is not sufficiently justified. Zhang et al. [1] use MCTS with LLaMA3 8B (which is also used in this paper) to enhance mathematical reasoning in LLMs, achieving 96.66% accuracy on GSM8K and 58.24% on MATH, which is significantly higher than the results of this approach.\n3. Although an ablation study on the BackVerify component is included, comparisons with other verification methods are lacking. For instance, the ReST paper [2] evaluates the impact of different verifiers on performance, but similar evaluations are absent in this work.\n4. While pruning tree search is a key contribution of the paper, there is no experimental analysis on the extent to which the pruning strategy reduces search time. Additionally, comparing the total inference time with other search-based methods is essential to substantiate the advantages of the pruning approach.\n\n**References:**\n- [1] Zhang, D., Huang, X., Zhou, D., Li, Y., & Ouyang, W. (2024). *Accessing GPT-4 level Mathematical Olympiad Solutions via Monte Carlo Tree Self-refine with LLaMa-3 8B*. arXiv preprint arXiv:2406.07394.\n- [2] Zhang, D., Zhoubian, S., Hu, Z., Yue, Y., Dong, Y., & Tang, J. (2024). *ReST-MCTS: LLM Self-Training via Process Reward Guided Tree Search*. arXiv preprint arXiv:2406.03816." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- How does the diversity and quality of the training data influence the performance of BEATS, particularly in edge cases or complex problems?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- BEATS uses a unique tree search strategy with pruning and back-verification to streamline reasoning paths and verify answers, improving accuracy and efficiency.\n\n- Empirical results across multiple datasets (MATH, GSM8K, SVAMP, etc.) show notable improvement over existing methods.\n\n- The inclusion of a question disambiguation component helps clarify ambiguous problems, potentially reducing error." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents BEATS, a novel approach to improving the mathematical problem-solving abilities of large language models (LLMs). It introduces a method that combines enhanced prompting, tree search with pruning, and a back-verification technique. BEATS claims significant improvements, particularly with the Qwen2-7B model, outperforming benchmarks such as GPT-4 on the MATH dataset." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- This component, though effective, adds additional steps to the inference phase, potentially affecting efficiency in real-time applications.\n\n- The paper could benefit from a more detailed discussion of the limitations of the proposed methods and potential areas for future work, such as the impact of training data on performance.\n\n- Further discussion on how the pruning limits affect accuracy vs. computation trade-off would add valuable insight." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please also refer to the weakness section.\n1. The overall framework is based on prompt engineering, which strongly relies on the capability of LLM. Can the proposed method give such significant performance improvement when dealing with Olympiad math reasoning datasets, e.g., AIME, Olympiad?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- The challenges proposed by the authors are reasonable. These challenges can inspire future research. The proposed method combines techniques that successfully alleviate the problems.\n- The experimental results are promising. The proposed method significantly\nimproves the performance of each base model compared to the comparison\nmethods.\n- This paper is well-written and organized." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the mathematical reasoning problem in aspects of prompt engineering. The authors highlight the suboptimal prompts, high costs, and ineffective verification issues, and propose a tree-search-based prompt engineering method. The experiments show that the proposed method outperforms existing methods by a margin." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The novelty of this paper is somewhat limited. For example, the back verification has already been proposed in [1]. The heuristic pruning rules, e.g., Rule (3), are also common used in math reasoning. Tree-based searching methods [2] are not new either.\n- The inference cost of each method should be reported. As the SFT and zero-shot methods usually require one inference, the proposed methods require multiple samplings, making the comparison unfair.\n- The experimental results require deeper discussion. For example, the authors mention an issue with \"ambiguous problem statements\" and introduce a prompt engineering method to address it. However, there is insufficient explanation of how having the LLM rewrite the problem itself resolves this issue, and there is no comparison between the original and rewritten versions to demonstrate the effectiveness of the LLM. Additionally, if the LLM can rewrite the problem on its own, why can't it directly solve the problem?\n\n[1] Large Language Models are Better Reasoners with Self-Verification. EMNLP\n(Findings) 2023: 2550-2575\n\n[2] Accessing GPT-4 level Mathematical Olympiad Solutions via Monte Carlo\nTree Self-refine with LLaMa-3 8B: A Technical Report." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@misc{\nsun2024beats,\ntitle={{BEATS}: Optimizing {LLM} Mathematical Capabilities with BackVerify and Adaptive Disambiguate based Efficient Tree Search},\nauthor={Linzhuang Sun and Hao Liang and Jingxuan Wei and Bihui Yu and Conghui He and Zenan Zhou and Wentao Zhang},\nyear={2024},\nurl={https://openreview.net/forum?id=03u7pbpyeN}\n}" }, "abstract": { "value": "Large Language Models (LLMs) have exhibited exceptional performance across a broad range of tasks and domains. However, they still encounter difficulties in solving mathematical problems due to the rigorous and logical nature of mathematics. Previous studies have employed techniques such as supervised fine-tuning (SFT), prompt engineering, and search-based methods to improve the mathematical problem-solving abilities of LLMs. Despite these efforts, their performance remains suboptimal and demands substantial computational resources. To address this issue, we propose a novel approach, BEATS, to enhance mathematical problem-solving abilities. Our method leverages newly designed prompts that guide the model to iteratively rewrite, advance by one step, and generate answers based on previous steps. Additionally, we introduce a new back-verification technique that uses LLMs to validate the correctness of the generated answers. Furthermore, we employ a pruning tree search to optimize search time while achieving state-of-the-art (SOTA) performance. Notably, our method improves Qwen2-7b-Instruct's score from 36.94 to 61.52 (outperforming GPT-4’s 42.5) on the MATH benchmark." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": { "value": [ "~Linzhuang_Sun1", "~Hao_Liang7", "~Jingxuan_Wei1", "~Bihui_Yu1", "~Conghui_He2", "~Zenan_Zhou1", "~Wentao_Zhang1" ] }, "authors": { "value": [ "Linzhuang Sun", "Hao Liang", "Jingxuan Wei", "Bihui Yu", "Conghui He", "Zenan Zhou", "Wentao Zhang" ] }, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Large Language Models", "Tree Search", "Back Verification" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": { "value": "sun|beats_optimizing_llm_mathematical_capabilities_with_backverify_and_adaptive_disambiguate_based_efficient_tree_search" }, "pdf": { "value": "/pdf/de578034a5813e95799dc7ad53f56e41e6921edc.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "BEATS: Optimizing LLM Mathematical Capabilities with BackVerify and Adaptive Disambiguate based Efficient Tree Search" }, "venue": { "value": "ICLR 2025 Conference Withdrawn Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Withdrawn_Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
04RGjODVj3
From Rest to Action: Adaptive Weight Generation for Motor Imagery Classification from Resting-State EEG Using Hypernetworks
main
Active
Brain-Computer Interfaces (BCIs);Motor Imagery;HyperNetworks;Data driven learning;Adaptive weights
applications to neuroscience & cognitive science
1;3;3;5
5;5;5;4
1;2;2;3
1;2;2;2
1;1;1;2
3
4.75
2
1.75
1.25
-0.816497
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "The proposed model, which simply combines existing HyperNetwork and EEGNet, lacks substantial innovation. In addition, it is difficult to confirm the advantages of the proposed model from comparative experiment results as the performance improvement of the proposed method has not been consistently demonstrated across multiple datasets. This is, the proposed model achieved improved performance on the Dreyer et al. dataset in Table 1, while its performance degraded on the BCI Competition IV IIa dataset in Table 2. Furthermore, there has been no meaningful discussion about these conflicting results." }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "This study try to address the important issue of cross-user variability and BCI illiteracy issues in the MI-EEG analysis by adopting the ability of HyperNetwork to adaptive weight generation to learn user-specific representations." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors proposed a HyperEEGNet architecture by combining the conventional HyperNetwork and EEGNet to adress cross-user variability for MI-BCI systems. The authors compared the performance of the proposed HyperEEGNet with that of competing EEGNet on various publicly available MI-EEG datasets in both cross-session and cross-user conditions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "There is no substantial innovation in proposed method combining the conventional HyperNetworks and EEGNet. \n\nThe performance improvement of the proposed method over existing EEGNet has not been consistently demonstrated across multiple datasets. This is, the proposed model achieved improved performance on the Dreyer et al. dataset, while its performance degraded on the BCI Competition IV IIa dataset. Furthermore, there has been no meaningful discussion about these conflicting results.\n\nNo comparisons were conducted with existing state-of-the-art methods that have addressed the subject variability issue." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The term \"epoch\" seems to be overloaded since they are common terms used in EEG and in machine learning but mean different things. I think it becomes unclear which meaning you use in the paper sometimes, for example here is the term \"epoch\" used in close proximity, although the former seems to mean a window of data and the latter seems to mean the number of times the HyperNet is trained on all batches: \n>• Motor imagery activity data in the form of an epoch is used to perform the binary class\nclassification with a forward pass on EEGNet with the generated weights from HyperNet.\n• Cross entropy loss is accumulated for a batch of 50 epochs, and backpropagation is performed only on HyperNet parameters. Adam optimiser with learning rate 1e-4 is used.\" \n\nIt would be better if the term \"epoch\" is better clarified when used. \n\n2. >\"For the dataset from Dreyer et al. (2023), the ”acquisition runs” from 33 participants are used for training and stratified 5-fold cross-validation is used to select the best model.\" \n\nWhat variables are changed to select the best model? Is it the model architecture? Are hyperparameters tuned at all?\n\n3. Although this passage is from the \"2.4.1 Cross-Session Condition\", it seems to imply that the train-test split is not split across sessions: \n>\"For the BCI IV IIa dataset, the data from all nine participants is divided into five folds with stratified cross-validation; each fold in the iteration is considered as a test set while the other set is split with an 80-20 ratio to choose the best-performing model on the validation set.\" \n\nThe original work for the BCI competition seems to imply that there are two sessions for each subject. Is there a reason that evaluation across sessions does not seem to be done in the current work?\n\n4. In Table 1, EEGNet without the HyperNet seems to have 4 to 6 times the standard deviation as EEGNet with the HyperNet. Is there an explanation for this, especially when compared to how the ratio of the standard deviation seems to be much closer to 1 for Tables 2 and 3? If the same test in Table 1 is run with multiple seeds and using different subjects (not just the last 9 subjects) as the test set, would we still see such high variation across multiple folds for EEGNet without the HyperNet? \n\n5. For a practical control interface during deployment, it seems unclear when the resting state would occur, which is the input used in the HyperNet. Would a separate resting state classifier have to be used? Or would some other heuristic to determine resting state be sufficient?\n\n6. What is the current state of the art in terms of performance for these two datasets for the metrics you evaluated?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is original in that they apply a HyperNet architecture to improve the generalization capabilities of an EEG motor imagery classifier. The paper evaluates inter-subject and inter-session performance, which are both important metrics for deployment of a BCI. The authors are fairly clear in how experiments are done, although I had some questions about intersession evaluation for the BCI IV IIa dataset. The work is significant in that a new method is evaluated on EEG data and shows strong generalization performance in a dataset with 42 subjects." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper aims to show the benefits of using a HyperNet architecture to improve the generalization capabilities of EEGNet for generalization given a large dataset. The authors also use data from the resting state before a trial as a novel input for motor imagery classification." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Claims on the strength of HyperNet + EEGNet would be improved through using a more comprehensive evaluation on the Dreyer et al. dataset. Leave-N-subjects-out train-test split should be done where around a quarter of the subjects are used as test subjects each time. \n2. The HyperNet + EEGNet approach does not seem to work for the BCI IV IIa dataset (one of the two datasets tested). The authors mention that it is evidence that the method does not seem to work unless with a larger dataset. It would be better if there were another dataset that can be tested to show that the HyperNet + EEGNet approach does indeed improve classification given more than 9 subjects. Alternatively, the Dreyer et al. dataset could be evaluated while varying the number of subjects for training, e.g. 8, 16, 24, 32, etc to see if the trend of improving performance given more subjects occurs." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "What specific task-related information do you think should be included to optimize the input frequency and enhance model performance? How might this affect the model's practical deployment?\n\nWhy wasn't the optimization of resting-state EEG data representations, especially concerning brain connectivity, explored? What additional features do you believe are important for downstream tasks that current measures do not capture?\n\nWhat criteria will you use to evaluate the efficacy of HyperEEGNet in comparison to other transfer learning methods? Are there particular metrics or datasets that you consider essential for this assessment?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The model exhibits robust generalization capabilities, performing effectively in both Leave-Subject-Out scenarios and with larger datasets, demonstrating its ability to handle unseen subjects. Additionally, this approach promises reduced calibration time, which is essential for real-world BCI applications, thereby enhancing user-friendliness and practicality." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduce a new architecture called HyperEEGNet aimed at enhancing EEG-based brain-computer interface (BCI) systems. This innovation addresses issues related to lengthy calibration sessions and the limited use of resting-state EEG data in motor imagery decoding tasks. By combining HyperNetworks with the EEGNet framework, HyperEEGNet adaptively generates weights for motor imagery classification based on resting-state data. In Leave-Subject-Out scenarios using a dataset of nine participants, its performance is comparable to the baseline EEGNet. However, when applied to larger datasets with 33 participants, HyperEEGNet shows improved generalization capabilities, effectively utilizing resting-state EEG information to manage unseen subjects. The model provides strong representations in both cross-session and cross-user contexts, underscoring the value of resting-state data for tasks such as motor imagery classification. Additionally, the results indicate that HyperEEGNet has a smaller memory and storage footprint, making it well-suited for edge computing. This approach offers faster user calibration and enhances the practicality of real-world BCI applications, representing a significant advancement in the field." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The initial evaluations rely on a relatively small dataset comprising just nine participants, which may not adequately reflect the variability found in larger populations. This raises questions about the generalizability of the findings without access to more extensive and diverse datasets. Additionally, while the use of resting-state EEG data is a novel approach, the model's performance may be affected if the quality or relevance of this data varies among different users or sessions. Furthermore, incorporating HyperNetworks adds a layer of complexity to the training and tuning process, potentially necessitating greater computational resources and specialized knowledge for effective implementation. Lastly, like many deep learning models, HyperEEGNet may have limitations in interpretability, making it difficult to ascertain how specific features impact its classification decisions." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "What specific strategies were implemented to mitigate overfitting during training, especially given the observed risks at larger epoch sizes? How do you plan to validate the model's performance in cross-user scenarios beyond the training dataset?\n\nCould you elaborate on the implications of a steep learning curve and rapid convergence in just 50 epochs? What does this suggest about the model's capacity to capture complex patterns in the data?\n\nWhile the focus on two-class motor imagery classification is noted, what are the plans for extending this model to accommodate multiple classes or different downstream tasks? How do you envision addressing potential challenges in this expansion?\n\n How do you explain the performance variations among participants, particularly the discrepancy in accuracy for Participant ID 3? What insights can be gained from comparing the weights generated by the HyperNet with those from an EEGNet trained directly on activity data?\n\nWhy was the optimization of resting-state EEG data representations not explored, particularly regarding brain connectivity? What additional features do you think might be important for downstream tasks that are not captured by current measures?\n\nWhat criteria will you use to compare the efficacy of HyperEEGNet against other transfer learning approaches? Are there specific metrics or datasets that you consider critical for this comparison?\n\nWhat specific task-related information do you believe should be incorporated to optimize the input frequency and further enhance model performance? How will this impact the model's practical deployment?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Robust Generalization: The model demonstrates strong generalization capabilities, performing well in both Leave-Subject-Out scenarios and with larger datasets, indicating its effectiveness in handling unseen subjects.\n\nReduced Calibration Time: The approach promises faster user calibration, which is crucial for real-world BCI applications, making it more user-friendly." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose a novel architecture, HyperEEGNet, to improve EEG-based brain-computer interface (BCI) systems, addressing the limitations of long calibration sessions and the underutilization of resting-state EEG data in motor imagery decoding tasks. By integrating HyperNetworks with the EEGNet architecture, HyperEEGNet adaptively generates weights for motor imagery classification based on resting-state data. In Leave-Subject-Out scenarios using a dataset with nine participants, the model performs comparably to the baseline EEGNet. However, when scaled to datasets with 33 participants, HyperEEGNet demonstrates enhanced generalization capabilities, effectively leveraging resting-state EEG information to handle unseen subjects. The model achieves robust representations in both cross-session and cross-user scenarios, highlighting the potential of resting-state data for downstream tasks like motor imagery classification. Furthermore, the findings indicate that HyperEEGNet's smaller footprint reduces memory and storage requirements, making it suitable for edge computing. This approach promises faster user calibration and improved feasibility for real-world BCI applications, advancing the field significantly." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Limited Dataset Size: The initial evaluations involve a relatively small dataset with only nine participants, which may not fully capture the variability present in broader populations. The generalizability of the findings could be questioned without larger, more diverse datasets.\n\nDependence on Resting-State Data: While leveraging resting-state EEG data is innovative, the model's effectiveness might be limited if the quality or relevance of the resting-state data varies across users or sessions.\n\nComplexity of HyperNetworks: The integration of HyperNetworks may introduce additional complexity in model training and tuning, potentially requiring more computational resources and expertise to implement effectively.\n\nInterpretability: As with many deep learning models, the interpretability of HyperEEGNet's decision-making process might be limited, making it challenging to understand how specific features influence classifications." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose an architecture that leverages resting state EEG data for generating weights via hypernetworks for downstream task like Motor Imagery" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024from,\ntitle={From Rest to Action: Adaptive Weight Generation for Motor Imagery Classification from Resting-State {EEG} Using Hypernetworks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=04RGjODVj3},\nnote={under review}\n}" }, "abstract": { "value": "Existing EEG-based brain-computer interface (BCI) systems require long calibration sessions from the intended users to train the models, limiting their use in real-world applications. Additionally, despite containing user-specific information and features correlating with BCI performance of a user, resting-state EEG data is underutilized, especially in motor imagery decoding tasks. To address the challenge of within and across-user generalisation, we propose a novel architecture, HyperEEGNet, which integrates HyperNetworks (HNs) with the EEGNet architecture to adaptively generate weights for motor imagery classification based on resting-state data. Our approach performs similarly in a Leave-Subject-Out scenario using a dataset with 9 participants, compared to the baseline EEGNet. When the dataset size is scaled, with 33 participants' datasets, the model demonstrates its generalisation capabilities using the information from resting state EEG data, particularly when faced with unseen subjects. Our model can learn robust representations in both cross-session and cross-user scenarios, opening a novel premise to leverage the resting state data for downstream tasks like motor imagery classification. The findings also demonstrate that such models with smaller footprints reduce memory and storage requirements for edge computing. The approach opens up avenues for faster user calibration and better feasibility of edge computing, a favourable combination to push forward the efforts to bring BCIs to real-world applications." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Brain-Computer Interfaces (BCIs)", "Motor Imagery", "HyperNetworks", "Data driven learning", "Adaptive weights" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/b85b237d2a962f5758e3e5244045cb38ec1196a8.pdf" }, "presentation": null, "primary_area": { "value": "applications to neuroscience & cognitive science" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "From Rest to Action: Adaptive Weight Generation for Motor Imagery Classification from Resting-State EEG Using Hypernetworks" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
04RLVxDvig
NanoMoE: Scaling Mixture of Experts to Individual Layers for Parameter-Efficient Deep Learning
main
Active
Mixture of Experts;Parameter Efficiency;Expressivity;Low-Rank Factorization
unsupervised, self-supervised, semi-supervised, and supervised representation learning
3;3;3;3
4;4;4;4
2;3;2;2
2;2;2;2
3;3;1;3
3
4
2.25
2
2.5
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Could you elaborate on the training details, such as the learning rate and optimizer? Did you properly tune them and were the results sensitive to these choices?\n- Does NanoMoE lead to higher activation memory due to larger intermediate tensors?\n- How does NanoMoE compare with low-rank in terms of performance vs wall clock time (rather than parameter count or FLOPs) ?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- NanoMoE is a novel family of structured matrices with clear theoretical advantages over low-rank matrices in terms of expressiveness, especially in achieving higher ranks for the same parameter count.\n- The paper rigorously proves the said advantages of NanoMoE\n- Experiments support the theory." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces NanoMoE, a novel family of structured matrices that achieves superior flexibility compared to low-rank matrices with minimal increase in parameters or FLOPs. The paper theoretically proves that NanoMoE can have a significantly higher rank and is strictly more flexible than low-rank matrices for similar parameter counts. Some experiments confirm the improved performance of NanoMoE layers relative to low-rank layers." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Experiments are only done on toy problems such as dense matrix approximation and a small text classification dataset. Can the authors present experiments on tasks such as image classification on CIFAR-10 / ImageNet and language modeling (e.g., using the nanoGPT codebase)? Results on these benchmarks have been used in evaluating new structured matrices in recent works [1, 2].\n- There are already many equally parameter-efficient structured matrices that have the advantage of being full-rank, such as the Kronecker product, Tensor-Train decomposition, and Monarch matrices [1]. There is no comparison with these alternatives.\n- While more expressive than the usual low-rank matrix, I believe NanoMoE will require more memory to store the activations (intermediate tensors have size $K r$ rather than just $r$). Moreover, I suspect the tensor core utilization will be lower because the block diagonal matrices involve contraction with smaller ranges, resulting in worse wall clock times despite having a minimal increase in FLOPs. The authors did not discuss these potential limitations.\n- The experiment section does not provide details about how the models were trained. For example, are the learning rates well-tuned? Prior work [2, 3] has shown that structured matrices require very different learning rates than those commonly used for dense layers, making a well-tuned learning rate important for a fair comparison.\n- The paper presents the connection to MoE as a strength since it has been shown to be more compute-efficient for pre-training LLMs. But only sparse MoE models have demonstrated improved training efficiency and is what was used in referenced models such as Mixtral and Switch Transformer. The proposed NanoMoE, however, is not a sparse MoE model and is, therefore, unlikely to lead to similar benefits. The authors carefully discuss this distinction.\n- Recent works have used structured matrices to build MoE in each linear layer, similar to what is proposed in this work. I suggest the authors to discuss these highly related works. [3, 4]\n\n[1] Dao et al. 2022. Monarch: Expressive Structured Matrices for Efficient and Accurate Training\n\n[2] Qiu et al. 2024. Compute Better Spent: Replacing Dense Layers with Structured Matrices\n\n[3] Potapczynski et al. 2024. Searching for Efficient Linear Layers over a Continuous Space of Structured Matrices\n\n[4] Oldfield et al. 2024. Multilinear Mixture of Experts: Scalable Expert Specialization through Factorization" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- What are the hyper-parameters used to conduct the experiments? See weaknesses section for what it’s relevant to discuss. What’s the sensitivity of NanoMoE to hyper-parameters?\n- What is the loss function used to optimise the first experiment of section 4? What is this experiment trying to show (irrespective of matching conclusions with the AG news experiment)?\n- What is the runtime of NanoMoE compared to dense matmuls either with low-rank or not? How complicated is it to run this efficiently in modern accelerators? Is this future work?\n- Is NanoMoE more prone to overfitting in the experiments?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The method to mix low-rank factorization intermediate outputs is interesting.\n- The paper emphasizes how performance improves as a function of FLOPs, which is important to develop scalable methods, especially for pre-training." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces NanoMoE, a building block which adds an additional mixing layer to low-rank factorization layers for linear projections. The paper draws connections to the mixing matrix from the mixture of expert literature, and characterises the space of matrices it can represent. Finally the authors test the proposed method on a synthetic task on various FLOPs budgets and on the AG news classification task." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**Conceptual Framing**\n\n- The connection to the sparsely-gated mixture of experts literature is very weak. As the method is described in section 3, the matrix M performs mixing over the partitioned input x_in, this is very different from [1] which is referenced in section 1, where specific components of the network learn to route inputs to “experts”. Nanomoe rather does a sort of sparse mixing over the embedding dimension, where no sparse routing or “expertise” is learned.\n- The claims in the paper are too overreaching.\n - Mixture of expert layers and sparse layers have already been applied to individual layers in prior work, see [1] and [2] [3] for preliminary references, a more thorough literature review should be in the paper. This work does not scale more than previous work in terms of applying them to whole components of the network, or in the experimental setting size (which is very modest in NanoMoE).\n - Section 1 says “We formally define this problem as parameter-efficient deep learning”. There is not a formal definition of this in the paper, just a formal definition of the proposed method. Also, more related to the proposed approach, is sparse deep learning, which has a rich body of literature. The paper hardly proposes a new problem that is not already known or tackled in the deep learning literature.\n - There is no discussion about hardware efficiency other than a very loose definition of FLOPs in numpy. Real-world hardware efficiency is necessary to scale up methods as implied in the title.\n- The Monarch matrices line of work [5] [6] [7] seems very relevant to this work (it is not cited), as it deals with a more efficient building blocks with block-diagonal matrices, with detailed discussion on expressiveness, efficiency, experimental work and covering a super-set of the scope of this paper (both pre training with mixed sparse+dense, and finetuning of dense models). I highly recommend the authors to review [5] as a blueprint for this work. It’s worth a discussion of the differences between both methods, both in terms of modelling and hardware efficiency for pretraining; at the very least, this seems like an important baseline to have in the experimental section.\n\n**Experiments**\n\n- I found the first experiment from the OPT-13b layer very confusing. There is no description about what loss is being optimised, which makes it very difficult to interpret the results — the losses in Figure 2) and b) seem high but without any description it is not possible to know if any of the models is learning anything useful at all. Moreover, the input is random gaussians with a rather high standard deviation, again without any explanation, this task does not seem to be representative of a real training task at all.\n- The experiments compare to Low-Rank training as a baseline. However, a more important comparison to do is with a fully dense layer, which is what actually is commonly used in pre-training (which the paper advocates for in Section 1). Also, the related work section describes a number of models that would be important baselines to compare to, low-rank is arguably a simple baseline and not SOTA.\n- For the AG News classification dataset, there’s several important experimental details missing, which are crucial to understand the empirical merit of NanoMoE:\n - What is the loss being optimised?\n - What are the details of the vectorization layer? What’s the vocabulary size? How are words out of vocabulary handled?\n - How many epochs/steps occur during training?\n - What is the optimizer and what hyper-parameters are used? (batch size, learning rate, regularisation, etc)\n - How are the weights initialised in the NanoMoE layers? More generally speaking, which hyper-parameters are different in NanoMoE vs the low-rank baseline?\n - What is the granularity of the K and r ranges?\n - What is the activation function used in the experiments?\n- [7] shows that a careful parametrization is needed for structured matrices. This is a missing detail on the hyper-parameters, but also a missing discussion for NanoMoE too.\n- Figures 4) and 5) are hard to visualise with all the data points being very transparent. There is a lot of variance per Flop Budget, which probably is due to interactions of K and r. It is important to disentangle these effects as well.\n- Plotting the low envelope seems to ignore the fact that NanoMoE is overfitting at higher FLOP counts on figure 5b (if that’s not the case, the colours are making this difficult to interpret). Is NanoMoE more prone to overfitting at higher FLOP budgets? If it is, then the method is not very promising, it could also be a lack of proper regularisation, but this is not clear given the lack of experimental details.\n- Modern NLP solves classification problems such as AG News with unsupervised pre-training + transfer-learning (BERT-style models) or few-shot learning (GPT-style models). While large-scale pre-training is very expensive, there is work to pretrain BERT-style models in as little as 1 GPU day [4] which is more suitable to academic budgets. A *single* and small-scale experiment on this unsupervised learning setup, would be more apt to compare to modern methods in NLP (this can very well be the single best combination from the AG news experiment).\n- The definition of FLOPs seem to focus on inference considerations, as I think it computes the output of numpy.einsum_path over a single einsum operation (is not clear what operations are included in the call to enisum_path, a spelled out code snippet would be useful). However, this paper focuses as per section 1 on efficient pre-training. This calls for a definition of FLOPs per training step, which includes: forward and backward FLOPs, runtime bounds such as given in [5], and practical step time on modern accelerators. A number of these can be future work, but it needs to be disclosed explicitly in order to consider the merits of the paper.\n- All in all, I consider the experimental section to be too weak to claim this in the conclusion: “our empirical results consistently validate that NanoMoE achieves superior performance”. More thorough experiments need to be done before claiming this.\n\n[1] https://openreview.net/forum?id=B1ckMDqlg\n\n[2] https://proceedings.mlr.press/v162/dao22a/dao22a.pdf\n\n[3] https://openreview.net/forum?id=-b5OSCydOMe\n\n[4] https://proceedings.mlr.press/v202/geiping23a/geiping23a.pdf\n\n[5] https://proceedings.mlr.press/v162/dao22a/dao22a.pdf\n\n[6] https://openreview.net/forum?id=cB0BImqSS9&noteId=98BZANkxc8\n\n[7] https://proceedings.mlr.press/v235/qiu24f.html" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "**Questions:**\n- How were the hyperparameters chosen? Was any analysis conducted to determine optimal values, especially for selecting the dense layer in synthetic data experiments?\n- What prevented the focus from extending to multiple FFN layers? Was this due to increased complexity, as each dense layer would require a similar setup?\n- Why has NanoMoE not been tested on more complex architectures beyond single dense layers? How do the authors envision scaling it for larger models?\n- Is there a reason NanoMoE did not incorporate sparse gating mechanisms, as seen in other MoE frameworks?\n- How does NanoMoE compare with other parameter-efficient MoE-based or low-rank models in terms of accuracy and parameter reduction? Were any qualitative comparisons made?\n- Has NanoMoE been tested in transfer learning contexts? Would it retain its efficiency and performance when adapted to new tasks?\n\n\n**Suggestions:**\nThe theoretical foundation is strong, but more experiments are needed to assess NanoMoE's performance and complexity compared to other MoE and existing approaches. I suggest:\n- Adding performance comparisons with some existing baselines.\n- Extending the experiments to more layers beyond the embedding layer, ideally including FFN and attention layers for a thorough evaluation.\n- Compared with other MoE frameworks, NanoMoE’s structure is similar and would benefit from these benchmarks." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The strengths of this paper can be summarized as follows:\n\n- Efficiency in Resource Usage: The proposed method effectively reduces the number of parameters and computational demands, making it suitable for deployment in resource-constrained environments.\n- Maintained Performance: Despite the reduction in computational resources, the model achieves results that are competitive with more resource-intensive approaches.\n- Innovative Approach: The factorization and aggregation technique offers a fresh perspective on optimizing neural network architectures." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a novel variant of the mixture of experts model aimed at reducing the number of parameters and floating-point operations (FLOPs) in neural networks. This is achieved by factorizing individual feedforward layers and their corresponding input feature maps into smaller chunks, and then aggregating their outputs. By applying this operation to dense layers, the method significantly reduces parameter count and memory requirements while maintaining competitive performance levels\n\nThe paper’s main contributions include: introducing NanoMoE, a parameter-efficient block family with three complexity levels (NanoMoE-I, II, and III); proving NanoMoE’s higher expressivity over low-rank factorization with minimal parameter increase; and validating through experiments that NanoMoE achieves better model quality than low-rank factorization at similar resource budgets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Although the idea is interesting, the proposed method has several major weaknesses:\n\n- **Lack of Inference Speed Evaluation**: While the main objective is to reduce computational cost and memory footprint, the experiments focus primarily on parameter reduction. There is no discussion of whether the method improves inference speed, which is critical for assessing practical efficiency gains.\n\n- **Limited Experimental Scope**: The authors conduct only two experiments on a single dense layer or a simple model, making it difficult to assess the method’s feasibility for real-world deployment and its performance in more complex scenarios.\n\n- **Narrow Evaluation Metrics**: The evaluation is limited to loss reduction without considering classification accuracy, which would be valuable for classification tasks. Including transfer learning experiments would further help to gauge the method’s effectiveness across tasks.\n\n- **Absence of Baseline Comparison**: The approach of weight partitioning and non-gated mixtures of experts is not new[1]. Comparisons with existing methods that use similar techniques, such as [1] focusing on parameter reduction, would provide clearer insights into the proposed method’s relative performance and innovation.\n\n[1] Scaling Laws for Fine-Grained Mixture of Experts" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "What’s the loss function in the synthetic dataset experiments where you are sampling i.i.d gaussian random vectors of dimension 20480? The text mentions that it’s testing the FC layer from OPT-13B, which is a language model. It’s not clear to me what’s the training objective function here." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The idea is quite relevant to lots of on-going works that replace dense matrices with different structure matrices for improved performance. I think it’s a nice addition to the community.\n- The paper is well-written and easy to follow" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose to extend low-rank approximation of standard neural network weight matrices of the form W=UV into W = blockdiag(U) M blockdiag(V) where blockdiag(U) is a block diagonal reshaping of the original matrix U. M is a block matrix interpreted as expert weights for each possible combinations of subblocks from blockdiag(U) and blockdiag(V). The M matrix is parametrized in three different ways (scalar times identity, diagonal, diagonal plus outer product) with increasing expressivity proved theoretically but also increasing computational cost. The authors empirically validate that the proposed approach is better than low-rank in terms of train/test loss for 1) a synthetic data setting when controlling parameters and FLOPs, 2) AG news classification when controlling for parameters and FLOPs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The primary problem with all of the empirical evaluations in this paper is that they are not informative about whether the proposed approach is actually a good replacement for standard MoE layers or not. The baseline is just low-rank, which is shown to be less expressive already compared to the proposed nanoMoE. It’s essential to compare to a standard MoE where you’re not using any low-rank structure but with just dense matrices. \n\nIt’s also not surprising that when controlling for parameters, nanoMoE is performing better than low-rank since the parameter overhead introduced by K and r are relatively small (the authors sweeped over small values of K). \n\nI’m willing to change my scores if the authors add the dense matrix $W\\in\\mathbb{R}^{d_2\\times d_1}$ baseline and the standard MoE with dense matrices baseline, at least in a limited setting if the compute budget is a problem during rebuttal." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024nanomoe,\ntitle={NanoMoE: Scaling Mixture of Experts to Individual Layers for Parameter-Efficient Deep Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=04RLVxDvig},\nnote={under review}\n}" }, "abstract": { "value": "Large language models (LLMs) have achieved remarkable success, but their growing size leads to significant challenges in efficiency and cost. This work explores parameter-efficient deep learning, aiming to achieve comparable performance with fewer parameters and floating-point operations (FLOPs). We introduce NanoMoE, a novel family of parameter-efficient building blocks inspired by the Mixture of Experts (MoE) framework. NanoMoE offers a modular and efficient replacement for fully connected layers within traditional neural networks. We instantiate NanoMoE with three variants of increasing complexity and theoretically demonstrate its superior expressivity compared to low-rank factorization with minimal parameter increase. Empirical results validate that NanoMoE achieves superior model quality compared to low-rank factorization under the same parameter or FLOP budget, confirming its enhanced efficiency." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Mixture of Experts", "Parameter Efficiency", "Expressivity", "Low-Rank Factorization" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/4aa38d228c8912d3aa307666a911d282faf7ee53.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "NanoMoE: Scaling Mixture of Experts to Individual Layers for Parameter-Efficient Deep Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
04TRw4pYSV
Dual-Modality Guided Prompt for Continual Learning of Large Multimodal Models
main
Active
Continual learning;Large multimodal models;Efficient learning;Prompt learning
transfer learning, meta learning, and lifelong learning
3;3;3;5
4;3;4;3
3;2;2;2
2;2;2;2
1;3;1;3
3.5
3.5
2.25
2
2
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Prompt-based continual learning methods like L2P[1], DualPrompt[2], S-Prompts[3] and HiDe-Prompt[4] employ various prompt design and selection strategies. As for the prompt design, how does this paper demonstrate the superiority of the proposed method?\n2. Is there a writing error in Equation 12? This loss aims to increase the similarity between $x^t_P$ and $x_{instruct}$; however, as $x^t_P$ and $x_{instruct}$ become more similar, it means the prompt cannot provide additional information, which would be detrimental to prompt learning.\n\n[1] Wang Z, Zhang Z, Lee C Y, et al. Learning to prompt for continual learning[C]//Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2022: 139-149.\n\n[2] Wang Z, Zhang Z, Ebrahimi S, et al. Dualprompt: Complementary prompting for rehearsal-free continual learning[C]//European Conference on Computer Vision. Cham: Springer Nature Switzerland, 2022: 631-648.\n\n[3] Wang Y, Huang Z, Hong X. S-prompts learning with pre-trained transformers: An occam’s razor for domain incremental learning[J]. Advances in Neural Information Processing Systems, 2022, 35: 5682-5695.\n\n[4] Wang L, Xie J, Zhang X, et al. Hierarchical decomposition of prompt-based continual learning: Rethinking obscured sub-optimality[J]. Advances in Neural Information Processing Systems, 2024, 36." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper is well written and is the first prompt learning framework for rehearsal-free continual learning of LMMs. The experimental results show a significant improvement, with comparisons conducted across various tasks and datasets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a dual-modality guided prompt learning framework (ModalPrompt) tailored for multimodal continual learning to effectively leran new tasks while alleviating forgetting of previous knowledge. Extensive experiments demonstrate the superiority of the proposed method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The proposed method lacks substantial novelty, as prompt learning has already been widely used in fine-tuning pre-trained vision-language models in the continual learning setting.\n2. The baseline is too weak, thus the effectiveness of the method is not very convincing. For example, the baseline accuracy of zero-shot on the REC task is 0.00." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- I will ask the authors to revise the entire paper to clarify their method and arguments.\n- In the main text the authors repeatedly emphasize that their method is time-efficient in the sense that the time complexity of inference depends on the number of selected prompts rather than tasks. However, I find this unclear. First, during the inference for each task sample, one needs to compute the similarity with all the prompts, whose number equals to the number of tasks. If we disregard such selection computation, why should other methods exhibit an $O(N_{task})$ time complexity?\n- To illustrate the importance of the dual-modality guidance, the authors compared the full results with those from using only image or text modalities. This comparison could be biased, as it relies solely on $\\alpha$ or $\\beta$ for prompts selection in the latter case. To ensure fairness, for example, one could use two different text encoders to obtain two estimates of text-based similarities $\\beta$ and $\\beta'$. This allows for a comparison of results using $\\alpha + \\beta$ with those using $\\beta + \\beta'$. Can you carry out this comparison and show the results?\n- There seems to be a discrepancy between results in Fig.5 and Fig.6: GQA task features show their highest similarity with ImageNet prototype features (Fig. 5). yet the selected prototype prompts are primarily from the GQA task (Fig. 6)." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "In large models like LLMs and LMMs, learned prompts serve as new \"viewpoints\" that enhance the performance of the underlying LMMs on specific tasks. I believe exploring prompt-based \"continued learning\" techniques can be practically beneficial, especially with the availability of powerful LMMs." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a continual learning scheme for LMMs based on prompt selection and fusion. Experiments on eight datasets show the effectiveness of the proposed method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper is difficult to read, as it presents simple ideas in an abstract and complex manner. It requires a substantial revision before one can properly evaluate its soundness and contribution.Thus, I do not believe it is ready for publication at ICLR. Here are some areas of confusion I encountered:\n- In line 161, it states “The characteristic of LMM continual learning includes: …” It is unclear whether the authors refer to a general consensus on LMM continual learning or their specific proposals.\n- The summation in Eq.(3) lacks a dummy variable. Are you summing over individual prompts within a set for a specific task $t$?\n - Consider using $\\bar{x}$ for the average of prompts, as bold symbols could be confusing since they typically represent vectors.\n- In line 201, the projection should be defined as $\\text{Proj}_v(\\cdot):\\mathbb{R}^{d_v}\\rightarrow\\mathbb{R}^{d_t}$.\n- In Eq.(7), What is $X_p$? Is it the collection of all prompts? It's unclear how prompts are selected in your process.\n - One possible understanding: You have $N$ prompts for each of the $T$ tasks, so $T\\times N$ in total. The selection is performed over all the $T\\times N$ and produce $k$ most relevant ones.\n- Line 242 states, “To enhance knowledge transfer, the dual-modality features could serve as guiding cues for prompts to accurately get close to multimodal distributions of current task in feature space.” What are the dual-modality features? Are they the features of the current task? What do you mean by “multimodal distributions”? I don't think those terminologies are self-explanatory and commonly used in the field. Why is the closeness to the distribution helpful in enhancing knowledge transfer?\n- Eq.(9) abuses the symbol $\\mathbf{x}^t_p$ for prototype features, the same term is used for the “prompt features” in Eq.(3).\n- In Eq.(10) what are the definitions of $\\alpha^{\\le t}$ and $\\beta^{\\le t}$? What is the shape of $\\tilde{X}^t_p$?\n- In line 265, where do you define the parameters $\\theta_p^t$ of prototype prompts? \n- In Table 1, what is the metric of the first two methods?\n- In Table 2, what do $B_i$ and $M_i$ represent in the second row?\n- Previous text implies that “number of selection prompts $k$” refers to selecting the top-k most similar prompts. However, by line 448-455, it seems $k$ refers to the number of sets of prototype prompts. Which is the correct understanding?\n- Line 456 is confusing when it mentions “choosing three sets of prototype prompts.” Based on subsection 3.2 (line 237, “we term set of prompt for each task as prototype prompts”), shouldn’t the number of prototype prompt sets match the number of tasks, which is eight?\n- In Fig.5, it is not clear what quantity is plotted. Is it the average similarity between the prototype features and task features across all in task samples and targeting prototypes?\n\nIn addition, the visualization subsection at P.10 provides little information. Cherry-picking examples do not represent the overall behavior of your model. and I don't understand how these examples support the claim that your model retains previously learned knowledge." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.\tThere are numerous methods for multimodal prompt learning. Did the authors explore other approaches, and if so, how effective were they? \n2.\tAdditionally, why does the baseline comparison only include the LoRA method? Are there other fine-tuning methods considered? Could a direct comparison between LoRA and prompt learning be potentially unfair? \n3.\tIs there any comparison of FPS, storage, and speed?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.\tIntroduces an innovative, data-efficient solution to catastrophic forgetting, critical for LMM applications in dynamic task environments.\n2.\tDemonstrates strong empirical performance with improvements across key continual learning metrics.\n3.\tEfficient design enables lower computational cost, making it scalable for broader application." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents MODALPROMPT, a dual-modality guided prompt framework designed to address catastrophic forgetting in large multimodal models (LMMs) during continual learning. LMMs, which integrate visual and textual processing capabilities, encounter performance degradation when sequentially learning new tasks. To address this, MODALPROMPT leverages dual-modality (image and text) prompt learning to enable continual learning without task-specific expansion or data replay, which can be resource-intensive and raise privacy issues. By combining task-specific prototype prompts with a selection mechanism informed by image-text distributions, the model achieves improved task retention and transfer of knowledge across a variety of multimodal benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tThe baseline lacks a comparison with other prompt learning methods.\n2.\tComplexity in configuring prompt numbers and selection features may limit broader accessibility without further simplification or automation.\n3.\tModalPrompt needs to convincingly differentiate itself from prior work in prompt-based continual learning, likely through robust comparative experiments and ablations." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "See weakness." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The question investigated in this paper is critical and significant in the current deep learning community.\n2. The paper proposes a novel prompt learning framework for rehearsal-free continual learning of LMMs.\n3. They conduct extensive experiments to demonstrate the effectiveness and inference speed of proposed methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper explores continual learning in Large Multimodal Models, focusing on the challenge of enabling models to continuously learn across sequential tasks. The authors critically assess the limitations of existing approaches and propose a novel dual-modality guided prompt learning framework for multimodal continual learning. Extensive experiments show that the proposed method significantly enhances both performance and inference speed." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Although the experiment improves the performance and inference speed, the proposed method involves modality-specific prompts for each task, which is too simple compared to existing work that devises advanced prompt strategies in visual scenarios. Simultaneously, they lack of comparison with the amount of prompt-based methods. Such as: DualPrompt [1], L2P[2], CODA-Prompt[3].\n2. There exist some typos in the paper:\n 1. in line 100, `prpredominant'.\n 2. in line 128, ... set `prompt of prompts' ... \n3. The author proposes the setting of refrains from computation expansion in proportion to the number of tasks. Whether means we can continuously learn the sequential data in one model and the performance will continuously improve. In other words, how many tasks can the proposed method effectively handle within one model?\n4. In the experiment, there is a lack of results that compare one task in the continuous process, i.e. compare the performance at the time axes, which directly reflects the transfer capability when more previous knowledge is learned.\n5. There is no difference between the two items in equation 12 with the add operation.\n6. How does the proposed method assess forgetting? Does it require saving a lightweight projection layer for each task, or should the projection layer from a previous task be re-tunned after learning a new one?\n7. In Line 203, why does the encoder of visual E_I and textual E_T in CLIP realize the mapping of E_I(·) : R^{n_v ×d_v} →R^{d_v} ,E_T(·) : R^{n_t ×d_t} →R^{d^t}, which should exist error description?\n\n[1]. DualPrompt: Complementary Prompting for Rehearsal-free Continual Learning\n\n[2]. Learning to Prompt for Continual Learning\n\n[3]. CODA-Prompt: COntinual Decomposed Attention-based Prompting for Rehearsal-Free Continual Learning" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "a novel prompt learning framework for continual learning of large multimodal models" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024dualmodality,\ntitle={Dual-Modality Guided Prompt for Continual Learning of Large Multimodal Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=04TRw4pYSV},\nnote={under review}\n}" }, "abstract": { "value": "Large Multimodal Models (LMMs) exhibit remarkable multi-tasking ability by learning mixed datasets jointly. However, novel tasks would be encountered sequentially in dynamic world, and continually fine-tuning LMMs often leads to performance degrades. To handle the challenges of catastrophic forgetting, existing methods leverage data replay or model expansion, both of which are not specially developed for LMMs and have their inherent limitations. In this paper, we propose a novel dual-modality guided prompt learning framework (ModalPrompt) tailored for multimodal continual learning to effectively learn new tasks while alleviating forgetting of previous knowledge. Concretely, we learn prototype prompts for each task and exploit efficient prompt selection for task identifiers and prompt fusion for knowledge transfer based on image-text supervision. Extensive experiments demonstrate the superiority of our approach, e.g., ModalPrompt achieves +20% performance gain on LMMs continual learning benchmarks with x1.42 inference speed refraining from growing training cost in proportion to the number of tasks. The code will be made publically available." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Continual learning", "Large multimodal models", "Efficient learning", "Prompt learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/f557d72e5e53556a321bc19c7bcf56100cd4753a.pdf" }, "presentation": null, "primary_area": { "value": "transfer learning, meta learning, and lifelong learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Dual-Modality Guided Prompt for Continual Learning of Large Multimodal Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
04c5uWq9SA
A False Sense of Privacy: Evaluating Textual Data Sanitization Beyond Surface-level Privacy Leakage
main
Active
Privacy;NLP;Text;Reidentification;Data Release;Sanitization;Anonymization
alignment, fairness, safety, privacy, and societal considerations
3;5;5;8
4;3;4;4
2;2;3;4
3;2;3;3
3;2;2;4
5.25
3.75
2.75
2.75
2.75
0.080845
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "- Did you find major differences between the two datasets in terms of atomizing claims in documents? It seems to me that this would be more structured in a medical Q&A dataset, as compared to LLM-user interactions." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- Extremely well-written paper, with clear motivation and research questions.\n- The figures in the paper are very informative, and do an excellent job of conveying key information. The running examples were great for readability.\n- Clear problem statement, with adequate background and justification of design choices. Creative use of LLMs as an evaluation for text-coherence.\n- The results about access to different sets of auxiliary information were really interesting to me. The hypothesis about the non-uniformity of LLMs' instruction-following seems intuitive, but would be interesting to quantify this in its own right.\n- The human subject experiments were a nice touch - useful to know the capabilities of the two models in this context." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The manuscript seeks to highlight privacy concerns in text-sanitization techniques, by a) proposing a semantic-similarity based privacy metric for re-identification/matching attacks, and b) evaluating state-of-the-art defenses against such inference under the proposed metric. \n\nThe authors use a 2-step approach; in the first 'linking' step, sanitized documents are compared to externally known auxiliary information about a target individual using a TFIDF-based sparse retriever, and in the second 'semantic matching' step, a language model is used to assess similarity between atomic claims in the retrieved document and those in the original, unsanitized document.\n\nThe paper then evaluates several defense strategies to quantify information leakage under the above framework, and find that DP based methods may provide some of the strongest protections, albeit at the cost of data utility." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Can't think of any immediate flaws or weaknesses. Happy to discuss further once other reviews are in." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "L081, L099: Just to be sure: If I understood correctly, \"claims\" can refer to any sensitive or non-sensitive information in the original texts?\n\nL101: If (1) PII removal had 94% leakage (inferable claims), and (2) data synthesis (with or without DP?) has 9% lower leakage (i.e., 85% ?), why does (3) data synthesis without DP state 57% << 85% leakage?\n\nSection 2.3 Linking Method:\n- L146: Could you briefly motivate the use of the sparse BM25 retriever? Did you consider dense retrieval methods (say, using some form of text embeddings)? What are the benefits of BM25 vs. other sparse methods, or of sparse methods vs. dense methods, in particular for the linkage task at hand?\n- L147-148: You state your \"approach aggregates the auxiliary information into a single text chunk\" -- Does that mean you combine (concatenate?) _all_ \"atomized claims\" x^(i)_j across all j into the \"aux info\" {\\tilde x}^(i)? (Wouldn't hurt to write this down more explicitly.) (Just found the info in L296/Section 3.3 that the attacker gets 3 random claims for the linkage phase. I find that a bit late, it would be better to mention it directly in Section 2.3 to avoid confusion/guessing on the side of the reader.)\n\nSection 2.4 Similarity Metric:\n- L156: Can you give a specific reason for querying only with claims that were _not_ utilized in the linking phase? Besides, how do I know whether a claim about an original document was used for linking? If _all_ atomized claims are combined into the aux info (cf. previous question), and the aux info is used as query in the retriever, wouldn't this imply that all claims are already consumed in the linking phase?\n- L159: If I understood correctly, you define the \"similarity metric\" µ between the original and linked documents by querying a language model to judge the document similarity, where you assign values on a scale from 1 (for 'identical documents') to 3. I wonder if it would make more sense to start the scale at 0, since mathematically, a metric has the property of evaluating to 0 for identical inputs. (In your case, we would get \"µ(x,x) > 0\" instead of \"µ(x,x) = 0\".)\n- How do I know that the atomized claims, which are used to compute µ and hence to measure privacy preservation, are actually privacy-relevant, and not just some arbitrary, privacy-*in*sensitive facts?\n\nL313: I'm confused regarding the symbol \"µ\" seemingly used for multiple purposes. It is defined in Sec. 2.4, but here, you also use it for another metric induced by ROGUE-L scores.\n\nL317 (also L386): You state \"zero-shot prompting achieves an accuracy of 0.44\" for MedQA task utility, but why am I unable to find that result in Table 1 (for \"No Sanitization\", it says 0.69)?\n\nTable 1:\n- Calling the privacy metrics \"Overlap\" and \"Similarity\" is very confusing, since they actually mean the opposite (high lexical overlap and semantic similarity would indicate a high agreement between the two documents, but high scores in Table 1 mean good privacy). Name them lexical/semantic \"distance\" instead?\n- Talking about metrics: In Equation 1 you define a \"privacy metric\", I guess that is what is reported under the (why differently named?) \"Semantic Similarity\" column in Table 1. It is based on the \"similarity metric\" from Section 2.4, which has values between 1 and 3 -- How does it end up with values between 0 and 1 in Table 1?? I couldn't see any discussion on some form of normalization of these scores. The expected value of scores >= 1 in Eq. 1 would still result in a value >= 1, and not in [0,1). Please double-check how you actually compute the metrics. Try *not* to distribute information pertaining to one concept across the paper, but put it concisely into one place if possible. Also prefer consistent naming.\n\nTable 2:\n- The effect of \\epsilon appears surprisingly small to me, with only minimal changes across all metrics even when comparing \\epsilon=3 and 1024. Can you explain this behavior?\n- It would be interesting to compare with a random baseline where the utility is determined from completely random texts -- to rule out that 0.4x task utility in the case of MedQA can already be achieved based on completely random input (say, if the dataset suffers from strong class imbalance and the classifier always just guesses the largest class, thus obtaining an overly optimistic accuracy).\n\nTable 3:\n- L404: What exactly is the \"linkage rate\"? Please specify.\n- L423: Contradicting statements: Here, you state the last three claims are used, previously in L296, you mentioned 3 randomly selected claims.\n\nL443/Section 2.4: If you can switch the similarity metric µ also to ROGUE-L, please already state this as possible option in Section 2.4 where you introduce µ. Currently, you only say there that µ is determined by querying a language model.\n\nLastly, what are your thoughts on information that is both privacy-sensitive and utility-relevant, say, if one or more atomized claims are also strongly related to a downstream task? For instance, what if an atomized claim turns out to be \"John likes baseball\", and one of the WildChat categories is \"baseball\", too? Feel free to substitute \"baseball\" with something more delicate, such as \"drinking alcohol\".\n(Case A: If the baseball aspect is kept in the sanitized document, both µ and the chi^2 distance should be small, indicating poor privacy but good utility. Case B: If the baseball aspect was redacted, both µ and chi^2 should be larger, indicating better privacy but poorer utility.)\n\nAdditional considerations for related work:\n[1] also highlights the insufficiencies of superficial sanitization methods for text. [1] and also [3,4,5] propose differentially private methods that obfuscate texts. An evaluation framework for text rewriting has also been introduced previously [6].\n[2] has been published in parallel with (Yue et al., 2023) and also suggests differentially private synthetic text generation.\n- [1] Weggenmann & Kerschbaum, \"SynTF: Synthetic and Differentially Private Term Frequency Vectors for Privacy-Preserving Text Mining\", SIGIR 2018\n- [2] Mattern et al., \"Differentially Private Language Models for Secure Data Sharing\", EMNLP 2022\n- [3] Weggenmann et al. \"DP-VAE: Human-Readable Text Anonymization for Online Reviews with Differentially Private Variational Autoencoders\", WWW 2022\n- [4] Igamberdiev & Habernal, \"DP-BART for Privatized Text Rewriting under Local Differential Privacy\", ACL Findings 2023\n- [5] Bo et al., \"ER-AE: Differentially Private Text Generation for Authorship Anonymization\", NAACL 2019\n- [6] Igamberdiev et al. \"DP-Rewrite: Towards Reproducibility and Transparency in Differentially Private Text Rewriting\", COLING 2022" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Formalizing linkage attacks for unstructured text data is a nice and useful contribution, and enables a systematic evaluation of various (novel) text sanitization methods in the future.\n\nWhile not entirely new, cf. e.g., [1] and the already cited (Stadler et al., 2022), the observations that superficial sanitization methods (such as PII removal) are often insufficient to properly protect privacy remains important.\n\nFor most parts, the paper is well written and easy to follow. However, there are some uncertainties about metrics and inconsistencies between numbers reported in the texts and tables, which are (in my view) confusing to the reader and undermine the validity of the currently reported results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a framework to evaluate sanitization methods for releasing datasets with textual data. They highlight that obvious methods such as removing explicit identifiers like names is insufficient for properly protecting privacy, since other semantic details can also leak private information. Also, auxiliary information about an individual may be linkable to a supposedly sanitized target document, thus allowing an attacker to infer or recover further sensitive details.\n\nThe goal of the framework is the quantification of information leakage from sanitized documents given auxiliary information about the document's owner or author.\nThe framework proposes to determine auxiliary information from each original documents by extracting individual \"claims\". For each document, the attacker is given a subset of claims and runs a sparse retriever to find the best-matching document from the set of sanitized documents.\nThey then define a similarity metric, which is either determined by an LLM or the ROGUE-L score, to compute the similarity between the retrieved document and the remaining claims extracted from the original document.\nAdditionally, they define task-specific utility metrics for each evaluated dataset.\n\nIn the evaluation, the authors consider two datasets: MedQA from the medical domain with a question-answering task, as well as WildChat consisting of online conversations with ChatGPT and a text categorization task. They also consider a range of sanitization methods that either work by removing PII or by generating synthetic data, the latter also with the option of providing differential privacy.\nIn each scenario, the newly introduced semantic and lexical privacy metrics are computed, along with task-specific utility measures as well as the quality (coherence) of the sanitized texts. Lastly, they perform a human evaluation to determine which variant of the privacy metric best matches human preferences." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I stumbled across some inconsistencies between the numbers reported in the Tables and discussed in the text. Please double-check (cf. questions) and update, or explain the differences.\n\nSome details about the metrics and their computation remain unclear (cf. questions). Please try to use consistent naming and define concepts (such as the definition of metrics/distances) in one concise and consecutive piece of text (not spread across several sections).\n\nL323: I think the conclusion from a \"disparity between lexical and semantic similarity\" to \"these techniques primarily modify and paraphrase text without effectively disrupting the underlying connected features and attributes\" is made a bit prematurely: Both are entirely different measures, and even for \"no sanitization\", the lexical score is twice the semantic score. Also, what would happen if you shifted the (apart from the ordering: somewhat arbitrarily) assigned scores for the \"similarity metric\" in Section 2.4 from {1,2,3} to {0,1,2} or to {1,10,100}?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "N/A" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper focuses on the privacy leakage of text data. The authors design different prompts for LLM to evaluate the semantic similarity between two sentences, which is interesting. The experimental results are extensive. Multiple data sanitization techniques are included in the evaluation framework." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a privacy evaluation framework for data sanitization methods, specifically data anonymization and data synthesis, in the context of natural language. The framework can be summarized as follows: first, random records from the original data are sampled as the auxiliary data; then, an information retrieval technique is used to link the auxiliary data with the sanitized data, and an LLM is utilized to evaluate the semantic similarity between the original records and the linked records. The final similarity scores denote the degree of privacy leakage of the sanitized data." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The main issue of this paper is the definition of privacy leakage, which the authors equate with semantic similarity between auxiliary data and sanitized data. However, the semantic information of a sentence reflects its utility. If the semantic content of a sanitized sentence is altered, the sanitization method would be useless. Traditional data anonymization methods aim to remove only identifiers from a data record rather than all information. In this context, identifiers should be the privacy focus, and privacy leakage should refer specifically to identifier leakage.\n\n2. The technical novelty is relatively limited. The linking step uses the existing BM25 retriever, while the semantic similarity evaluation mainly relies on established prompt engineering techniques.\n\n3. The findings are not particularly interesting, as it is well-known that simple data anonymization and data synthesis techniques are insufficient to protect data privacy. This paper's findings merely confirm that this limitation also applies to text data.\n\n4. The numerical results rely on LLM output, which is relatively qualitative and less persuasive. Additionally, querying LLaMA three times for consistency seems unnecessary; disabling sampling parameters in text generation should ensure consistent results from LLaMA for the same query." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to my weaknesses. Also, I have a few new questions.\n\n1) How can your method extend to other datasets? Is there any real auxiliary data that can be used instead of creating overlapped auxiliary data from the original records?\n\n2) Regarding the concept of privacy, is converting the age of 23 to early 20s a privacy breach? Such conversion is commonly adopted by K-anonymity." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper is well-written and easy to follow. All the included sanitization methods are up-to-date and well-explained.\n\n2. The proposed method is straightforward in decomposing the re-identification with linking and matching methods. \n\n3. Experimental results are comprehensive with sufficient ablation experiments. The included baselines are solid and up-to-date." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates the current limitations of existing textual data sanitization methods. By considering re-identification attacks with known auxiliary information, the paper shows that a sparse retriever can link sanitized records with target individuals even though the PII patterns are anonymized. Instead, the paper proposes a new privacy evaluation framework for the release of sanitized textual datasets. The paper considers two datasets, including MedQA and WildChat, to show that seemingly innocuous auxiliary information can be used to deduce personal attributes like age or substance use history from the synthesized dataset. Experimental results also verify that current data sanitization methods create a false sense of privacy only on the surface level." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. My major concern is that the auxiliary data is highly contrived. Based on my understanding, each auxiliary sample is the subset of exact atomics from the target record. For example, in Fig 1, Auxiliary Information contains two atoms of the Original Record. That is, if you only consider de-identified, sanitized records, it is very easy for your BM25 retriever to get the sanitized target. In real-world re-identification attacks, there is no such auxiliary information that has many exact overlapped n-grams as original records. \n\n2. For the claim that 'private information can persist in sanitized records at a semantic level, even in synthetic data,' if you consider DP generation, the privacy level is indicated by $(\\epsilon, \\delta)$. That is, your linked record may not be the original target sample. DP introduces random noise to offer plausible deniability and protect the original record's privacy.\n\n3. The implemented methods for the proposed privacy evaluation framework only integrate various existing components by using the contrived auxiliary data. It is not likely to scale this framework for a large number of overlapped atoms." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Privacy evaluation for quantifying disclosure risks of sanitized dataset release beyond surface level, exposing false sense of privacy" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024a,\ntitle={A False Sense of Privacy: Evaluating Textual Data Sanitization Beyond Surface-level Privacy Leakage},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=04c5uWq9SA},\nnote={under review}\n}" }, "abstract": { "value": "The release of sensitive data often relies on synthetic data generation and Personally Identifiable Information~(PII) removal, with an inherent assumption that these techniques ensure privacy. However, the effectiveness of sanitization methods for text datasets has not been thoroughly evaluated. To address this critical gap, we propose the first privacy evaluation framework for the release of sanitized textual datasets. In our framework, a sparse retriever initially links sanitized records with target individuals based on known auxiliary information. Subsequently, semantic matching quantifies the extent of additional information that can be inferred about these individuals from the matched records. We apply our framework to two datasets: MedQA, containing medical records, and WildChat, comprising individual conversations with ChatGPT. Our results demonstrate that seemingly innocuous auxiliary information, such as specific speech patterns, can be used to deduce personal attributes like age or substance use history from the synthesized dataset.\nWe show that private information can persist in sanitized records at a semantic level, even in synthetic data. Our findings highlight that current data sanitization methods create a false sense of privacy by making only surface-level textual manipulations. This underscores the urgent need for more robust protection methods that address semantic-level information leakage." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Privacy", "NLP", "Text", "Reidentification", "Data Release", "Sanitization", "Anonymization" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/b617ee9754da2d62065cb9dc97b3024b9e28590d.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "A False Sense of Privacy: Evaluating Textual Data Sanitization Beyond Surface-level Privacy Leakage" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
04qx93Viwj
Holistically Evaluating the Environmental Impact of Creating Language Models
main
Active
machine learning;artificial intelligence;language model;large language models;environmental impact;carbon emissions;water usage
alignment, fairness, safety, privacy, and societal considerations
3;6;8
5;4;3
2;4;3
2;4;3
3;3;3
5.666667
4
3
3
3
-0.993399
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I would think similar studies are done for other fields such as electric vehicles. Are there better regulations for reporting in those? What are some other parallels and what other gaps can we find in transparency compared to them?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper raises very important concerns about transparency in the area of energy and water consumption required for developing as well as using LLMs.\n2. This paper includes aspects of this process which have not been reported before such as hyper-parameter tuning and manufacturing of GPUs.\n3. The discussion around these calculations is useful for others to understand the environmental implications of doing AI research." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper provides estimates and insights into power and water consumption during training and inference of Large Language Models (LLMs). Many of these estimates are based on real experiments in training these models of different parameter sizes. Some are rough estimates for cases where experiments were not possible, such as GPU manufacturing. The paper highlights that there are side activities such as data center cooling, development of the model etc. which are not accounted for in the literature reporting carbon emissions from model training." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. While some of the calculations are clear and seem reproducible as long as some of the manufacturer specific quantities are known, I am not 100% certain if all the steps can be followed by others. It would be useful if the authors can confirm whether someone can apply their methodology for similar experiments/calculations and if the paper contains all the details needed to do so.\n2. It would be helpful if the `Development` part of Section 4.1 can provide more details of what it covers i.e. what kind of HPO, what was the search space, what scaling experiments etc.\n\nMinor:\nmore transparency from developers on when, where, and how they a;re training their models -> more transparency from developers on when, where, and how they are training their models" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I would like the authors to address W1 and W2, whether they agree or not, and if they do, how they plan to address them." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "S1: This paper provides the first-ever comprehensive end-to-end view of the environmental impact of an LLM. It is highly valuable to have this data published, as we, as a research community, need to have a complete view of how current training and deployment practices affect CO2 production and water usage. The insights and data provided can be used as a building block to argue future performance improvements not only for $ cost reasons but also to quantify their environmental impact. \n\nS2: The authors take care to question current naive assumptions like a 100% power draw during training, making this paper stand out. While this is a low bar, research on environmental impact in LLM training has had its share of invalidated research due to these minor, overlooked details.\n\nS3: The authors estimate the water usage during the development of an LLM, which I have not seen before in this line of research. This adds a new dimension to the environmental impact, providing a more complete picture of how current AI practices affect our environment." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper estimates the real-world environmental impact of training LLMs, including hardware manufacturing, model development, final training runs, and deployment. They vary model sizes, model architectures, and training time, providing a first view of the embodied CO2 production and water usage from AI systems at these scales. The entire study results in a production of 270t CO2 and a usage of 1.1M liters of water.\n\nAdditionally, it finds that model development accounts for 80% of the environmental impact, which is often not stated in related work. Additionally, they find that power draw has highs and lows due to checkpointing, creating a mixed load on the electrical grid, which may not easily be addressed, resulting in control challenges for power providers." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "W1: The result of model development being a large chunk of the environmental impact is not too surprising, but I agree that it is important to track and present in this paper. I am wondering about the representativeness of the data presented in this paper for model development and whether we will see a similar trend continue in the future. Given that this is a key contribution outlined in the abstract, I question whether the number of 80% will change significantly in future related work and if there are steps to take to present this more confidently. I am afraid that researchers in related fields take the final training costs and multiply them by 5x due to the results in this paper.\n\nW2: The second point of discussion has a similar issue as W1. While I agree that oscillating power draw may be a problem for power providers, I hesitate to agree that this is an issue at large. GPU-memory checkpointing has been shown to be possible by Gemini (https://arxiv.org/pdf/2312.11805), which likely reduces this time to sub-seconds. I am not against keeping this insight in general, and that power draw may be an issue for future techniques, but I question the future-proofness of this discussion point. Also, this being a problem for power providers could be explained in more detail and what this implies for the environmental impact.\n\nW3 (minor issues):\n* The EU AI Act could be included in 5.1 as it also includes the environmental impact of AI systems (e.g. Art 95 https://artificialintelligenceact.eu/article/95/)\n* Figure 1 takes a lot of space for the amount of information it provides. A double y-axis may not be the best visualization as it makes it harder to initially grasp the information. Maybe using two scatter plots would make the visualization more compact and easier to understand?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. In table 2, why is OLMo reported twice is there a difference?\n2. I am curious to know how much time it took to train your models. Given the hardware resources you had (up to 64 HGX servers with h100), why was your study limited to 7 billion parameter models?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The discussion of energy consumption and costs incurred by training and deploying LLMs is a rising and highly relevant topic.\nThis paper has the following strengths:\n\n1. It is one of the first studies to report the environmental impact of model development, not just the final training runs, highlighting the significant but often overlooked costs associated with hyperparameter tuning and other development activities.\n2. Cost evaluation encompasses power consumption, carbon emissions, and water usage and is not confined to a single aspect.\n3. Reporting the costs and putting them into perspective with real-world equivalencies." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper discusses the impact of large language models on the environment by studying the costs associated with training and deploying them. It explores the hidden cost of training a model, particularly when it comes to hardware manufacturing and the pre-training steps of creating a model, along with the training and deploying costs. They run their experiments on a small set of models with parameter sizes ranging between 20 million and 7 billion parameters. The results show that models released 270 metric tons of carbon emissions and 1.137 million liters of water. Additionally, the author discusses the power fluctuation during training that is a result of model checkpointing." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Even though the topic is highly interesting he are some limitations to the paper:\n1. Lack of novelty: The issue of power consumption in LLMs has been widely studied, and this paper doesn't provide any additional ideas, metrics, or insights expect for the study development cost.\n2. The findings are based on a specific set of small models, which may limit the generalizability of the results to other models and data centers with different configurations and efficiencies\n3. The study does not include data from actual deployment and usage of the models, relying instead on simulated scenarios, which may not fully reflect the actual environmental costs. In fact, the paper has a limited set of inference simulations with very simplistic assumptions, which may not fully capture the real-world deployment scenarios and their environmental impacts. \n4. Some of the calculations rely on assumptions and estimates, particularly regarding the embodied emissions and water consumption of hardware manufacturing, which may not be entirely accurate.\n5. Limited comparison across models: The authors seem to have taken the carbon consumption of llama and OLMo in Table 2 from previous works without replicating results, which meant no water usage comparison for training. For deployment, they only compare with Llama.\n6. Given the small sizes of the models, the paper lacks an analysis of how their results scale to larger models." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Measuring the environmental impact, including carbon emissions and water usage, from training a series of language models." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024holistically,\ntitle={Holistically Evaluating the Environmental Impact of Creating Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=04qx93Viwj},\nnote={under review}\n}" }, "abstract": { "value": "As the performance of artificial intelligence systems has dramatically increased, so too has the environmental impact of creating these systems. While many model developers release estimates of the power consumption and carbon emissions from the final training runs for their latest models, there is comparatively little transparency into the impact of model development, hardware manufacturing, and total water usage throughout. In this work, we estimate the real-world environmental impact of developing a series of language models, ranging from 20 million to 7 billion active parameters, trained on up to 5 trillion tokens each. When accounting for hardware manufacturing, model development, and our final training runs, we find that our series of models released $\\textbf{270 metric tons}$ of carbon emissions, equivalent to powering about 53 homes in the United States for one year, and consumed $\\textbf{1.137 million liters of water}$, equivalent to about 10 years of water usage by a person in the United States, even though our data center is extremely water-efficient. We measure and report the environmental impact of our model development; to the best of our knowledge we are the first to do so for LLMs, and we find that model development, the impact of which is generally not disclosed by most model developers, amounted to $\\sim$$\\textbf{80}$% of that of training. By looking at detailed time series data for power consumption, we also find that power usage throughout training is not consistent, fluctuating between $\\sim$15% and $\\sim$85% of our hardware's maximum power draw, with negative implications for grid-scale planning as demand continues to grow. We close with a discussion on the continued difficulty of estimating the environmental impact of AI systems, and key takeaways for model developers and the public at large." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "machine learning", "artificial intelligence", "language model", "large language models", "environmental impact", "carbon emissions", "water usage" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/09f57c232e7b56840cd95b200d0e74eeea7e5db9.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Holistically Evaluating the Environmental Impact of Creating Language Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
063FuFYQQd
LLaVA-Surg: Towards Multimodal Surgical Assistant via Structured Lecture Learning
main
Active
Multimodal assistant;surgical;multimodal instruction-following data;dataset
foundation or frontier models, including LLMs
3;5;5;6;6
5;4;4;4;4
2;3;2;4;4
3;2;2;3;3
2;3;3;4;3
5
4.2
3
2.6
3
-0.912871
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Does splitting video into frames for CLIP’s visual encoder lead to a loss of spatiotemporal information, and wouldn’t a video encoder like Video Swin Transformer [2] better capture temporal dynamics?\n[2] Liu, Z., Ning, J., Cao, Y., Wei, Y., Zhang, Z., Lin, S., & Hu, H. (2022). Video swin transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (pp. 3202-3211).\n2. How does LLaVA-Surg perform compared to other state-of-the-art multimodal methods? In addition to general multimodal models, a detailed comparison with models like ReAct would provide a more comprehensive evaluation. Has comparison with other two-stage methods [3] in VQA task been overlooked?\n[3] Gai, X., Zhou, C., Liu, J., Feng, Y., Wu, J., & Liu, Z. (2024). MedThink: Explaining Medical Visual Question Answering via Multimodal Decision-Making Rationale. arXiv preprint arXiv:2404.12372.\n3. Is the two-stage question-answer generation process applicable to other medical fields, and if so, what adjustments would be required? Additionally, validating the method’s performance on public datasets like RAD, SLAKE, and PathVQA would strengthen its generalizability." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.\tThe authors provide a novel dataset, Surg-QA, which is a significant resource for training multimodal surgical models, covering diverse surgical procedures and question-answer pairs.\n2.\tThe two-stage pipeline for question-answer generation mitigates hallucinations in LLM outputs, resulting in higher quality and reliability of generated data.\n3.\tLLaVA-Surg demonstrates notable improvements over general multimodal models in zero-shot surgical video question-answering tasks, showcasing its efficacy in understanding surgical context." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces LLaVA-Surg, a multimodal large language model designed as a conversational assistant for surgical applications. To support this, the authors developed Surg-QA, a large-scale dataset containing 102,000 surgical video-instruction pairs, generated through a structured two-stage question-answer pipeline. This pipeline helps extract structured knowledge from surgical lecture videos, enabling the LLaVA-Surg model to understand complex surgical procedures and answer open-ended questions in a zero-shot setting. The model leverages CLIP for visual encoding and is fine-tuned on Surg-QA to specialize in surgical video question-answering, achieving superior performance compared to existing general-domain models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper should compare its model with recent multimodal LLM approaches, specifically ReAct (Yao et al., 2023), which combines reasoning and action for complex tasks.\n[1] Yao, S., Zhao, J., Yu, D., Du, N., Shafran, I., Narasimhan, K., & Cao, Y. (2023, January). ReAct: Synergizing Reasoning and Acting in Language Models. In International Conference on Learning Representations (ICLR).\n2. Using CLIP for frame-by-frame encoding lacks temporal modeling and increases processing costs and redundancy, burdening the LLM as frame count grows.\n3. The paper lacks an in-depth error analysis, especially regarding potential hallucinations or misunderstandings in complex surgical scenarios. Although the authors claim to reduce hallucinations, achieving perfect performance seems challenging.\n4. The model’s adaptability to other medical or clinical fields is unclear, as broader evaluations on datasets like RAD, SLAKE, and PathVQA are missing, which may limit its wider applicability." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "I am wondering about the WebSurg's policies on using their videos to train deep learning models, but I could not find any information about this in their terms of use." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The paper is very well written and addresses its objectives. It also supports its claims and provides adequate experiments. Therefore, I am leaning toward accepting this paper, but I have some minor concerns regarding the legality of using WebSurg's surgical videos. I also have some questions:\n1. The authors mention that the model is limited by hallucinations, which is a serious concern for a surgical chatbot. Could you please provide more details, and types of hallucinations, and give some examples?\n2. Would it be possible to evaluate LLaVA-Surg on the SSG-VQA dataset? I am interested in knowing more about the breadth of your dataset and if it contains enough information for cross-dataset generalization." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- **Clarity**: The paper is well-written and easy to follow. \n- **Contributions**: This work makes a significant contribution to the development of surgical chat assistants. The dataset contains a wider range of surgical QAs compared to previous works. The proposed model and dataset may be valuable resources for researchers in this area." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces LLaVA-Surg, a multimodal conversational assistant based on surgical videos. Additionally, they introduce a new dataset with 102,000 question-answer pairs for training multimodal LLMs. The authors provide details of their data generation procedure, which is carefully designed to avoid hallucinations. The paper provides detailed comparisons with existing general-purpose and surgical-purpose datasets. Lastly, the authors provide a human and LLM evaluation of the dataset, showing consistent scores." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- **Dataset Availability**: The surgical videos are available on WebSurg and are not a contribution of the authors. Therefore, the data availability may be subject to license changes from the content owners and WebSurg.\n- **Hallucinations and Data Quality**: As the authors mentioned, there may be hallucinations in the dataset, since it is automatically generated. The authors provide chatGPT and human evaluations, but that is not enough to infer the data quality.\n- **Model Availability**: It is not possible to reproduce the results since the model is not available yet, but enough details are provided to support the paper." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "Since doctors are hired to do the annotation, have the possible ethical risks been resolved? For example, IRB approval, etc." }, "flag_for_ethics_review": { "value": [ "Yes, Responsible research practice (e.g., human subjects, data release)" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Improvement of the methodology.\n- Detailed Comparison." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The Surg-QA dataset, along with the two-stage pipeline, is a significant contribution to medical AI. \n- LLaVA-Surg’s ability to process and interpret surgical video content sets it apart from other models focused primarily on static images.\n- The language is clearly presented. The authors use precise and concise language so that the reader can easily understand the dataset, methodology, and results of the study." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a novel vision-language model, LLaVA-Surg, designed to assist in surgical settings. Leveraging the newly created Surg-QA dataset with 102K surgical video-instruction pairs, the model provides conversational, open-ended responses to questions about surgical procedures. Evaluations demonstrate LLaVA-Surg’s superior performance in surgical video question-answering, indicating its potential as a reliable tool in surgery-related applications." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Although the dataset is valuable, this storyline and methodology is too similar with LLaVA-Med [1]. Maybe the authors could think of improvements of this simple fine-tuning method (i.e., SFT) to make better use of this dataset.\n- The paper lacks comparative results. The current comparative models are rarely trained on surgical scene data, which is unfair. It is necessary to compare with a specific model.\n- Since doctors are hired to do the annotation, have the possible ethical risks been resolved? For example, IRB approval, etc.\n\n[1] Li C, Wong C, Zhang S, et al. Llava-med: Training a large language-and-vision assistant for biomedicine in one day[J]. Advances in Neural Information Processing Systems, 2023." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "Potential copyright problem for online data." }, "flag_for_ethics_review": { "value": [ "Yes, Legal compliance (e.g., GDPR, copyright, terms of use)" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. How can the quality of the data be ensured? The data collected may already contain a lot of noise and has been reprocessed by an LLM. Is there any person or clinician reviewing these raw data?\n2. Can the data be released? Are there privacy and permission risks associated with the collected data?\n3. The authors need to conduct more zero-shot evaluations on downstream tasks relevant to the surgical field, such as phase recognition, action/instrument classification, and other surgical domain VQA data to demonstrate the clinical usability of their method.\n4. The authors need to compare with more state-of-the-art methods. The comparison methods in Table 3 were all first released in 2023.\n5. The authors may verify their dataset on more benchmarks of SOTA Video MLLM architectures.\n6. Also, the authors need more zero-shot comparisons with the same VLM trained on other surgical datasets, to showcase the generalizability of their proposed dataset.\n7. The authors may evaluate the visual quality of the surgical videos themselves, as they are obtained from the website." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. With over 102,000 video-instruction pairs, this dataset is the largest in the surgical field.\n2. Structured data annotation pipeline using LLMs minimizes the risk of generating inaccurate or nonsensical content, improving dataset reliability.\n3. Releasing the dataset, model, and code publicly fosters further research and development in the surgical AI domain.\n4. The dataset can be a valuable resource for training and education, helping surgical trainees learn through interactive Q&A about real procedures." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a novel surgical multimodal dataset, which consists of over 102,000 video-instruction pairs generated through a two-stage pipeline, aimed at enhancing the understanding and conversational capabilities of surgical videos." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper does not address how the data's quality is maintained as the videos are obtained from the web. The clinicians have reviewed the output of their MLLM model, but the paper does not confirm whether clinicians or domain experts have reviewed the raw data to ensure accuracy and reliability.\n2. Concerns regarding the release, privacy, and permission risks associated with using sensitive surgical videos are not adequately discussed.\n3. The paper lacks comprehensive validation across essential surgical downstream tasks and other surgical QA datasets, which are crucial for demonstrating clinical usability. There is also a need for more rigorous benchmarking against a broader range of state-of-the-art video MLLM architectures to establish the dataset's utility and the model's performance more robustly.\n4. The comparison of the proposed methods with SOTA methods is limited and does not include the latest works. The manuscript also lacks evaluations with models trained on other surgical datasets, limiting the assessment of the proposed model's generalizability across different surgical scenarios.\n5. The paper may need to evaluate the visual quality of the surgical videos." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Please address the weaknesses mentioned above." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "1. The pipeline is comprehensive: A two-stage question-answer generation process minimizes hallucinations by extracting information prior to generating pairs, which enhances data quality and reliability compared to Quilt-1M[1], which has a similar approach.\n\n2. Integrating surgical visual concept alignment data through action triplets improves text-visual alignment, enhancing the model’s grasp of surgical concepts.\n\n3. The idea is interesting: using the Spearman rank correlation between expert and GPT scores effectively validates the reliability of large-scale GPT evaluation.\n\n[1] Ikezogwo, Wisdom, et al. \"Quilt-1m: One million image-text pairs for histopathology.\" Advances in neural information processing systems 36 (2024)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces LLaVA-Surg, the first multimodal surgical assistant capable of understanding surgical videos and engaging in open-ended conversations about them. The authors create Surg-QA, a dataset of 102,000 surgical video-instruction pairs, using a novel two-stage question-answer generation pipeline. This approach reduces LLM hallucinations and costs by breaking down the generation process. The resulting model demonstrates superior performance in surgical video question-answering compared to previous general-domain models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Could you provide results for the three existing surgical-domain datasets (EndoVis-18-VQA, Cholec80-VQA, and SSG-VQA) trained on Surg-QA? These results could demonstrate Surg-QA's potential as a foundational dataset in the surgical domain.\n\n2. Maybe considering to use other video VLM models, which provides a more sophisticated approach to temporal fusion than simple average pooling." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024llavasurg,\ntitle={{LL}a{VA}-Surg: Towards Multimodal Surgical Assistant via Structured Lecture Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=063FuFYQQd},\nnote={under review}\n}" }, "abstract": { "value": "Multimodal large language models (LLMs) have achieved notable success across various domains, while research in the medical field has largely focused on unimodal images. Meanwhile, current general-domain multimodal models for videos still lack the capabilities to understand and engage in conversations about surgical videos. One major contributing factor is the absence of datasets in the surgical field. In this paper, we create a new dataset, Surg-QA, consisting of 102,000 surgical video-instruction pairs, the largest of its kind so far. To build such a dataset, we propose a novel two-stage question-answer generation pipeline with LLM to learn surgical knowledge in a structured manner from the publicly available surgical lecture videos. The pipeline breaks down the generation process into two stages to significantly reduce the task complexity, allowing us to use a more affordable, locally deployed open-source LLM than the premium paid LLM services. It also mitigates the risk of LLM hallucinations during question-answer generation, thereby enhancing the overall quality of the generated data. We further train LLaVA-Surg, a novel vision-language conversational assistant capable of answering open-ended questions about surgical videos, on this Surg-QA dataset, and conduct comprehensive evaluations on zero-shot surgical video question-answering tasks. We show that LLaVA-Surg significantly outperforms all previous general-domain models, demonstrating exceptional multimodal conversational skills in answering open-ended questions about surgical videos. We will release our code, model, and the instruction-tuning dataset." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Multimodal assistant", "surgical", "multimodal instruction-following data", "dataset" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/04d73daf100581d96e3a971dd358d0aad68ebdd1.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "LLaVA-Surg: Towards Multimodal Surgical Assistant via Structured Lecture Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
06B23UkNid
MV-CLAM: Multi-View Molecular Interpretation with Cross-Modal Projection via Language Model
main
Active
Molecule captioning;large language models;drug discovery;molecule representation learning
applications to computer vision, audio, language, and other modalities
3;3;5;5
4;4;3;4
2;1;3;3
2;2;2;2
2;1;2;3
4
3.75
2.25
2
2
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. 3D structures (conformers)\n\nAs mentioned in sec. 5.1 you use MMFF for molecular conformation generation.\n\na. Is it ETKDG geometry generation with further MMFF optimization?\nb. Since it is possible to generate several different conformers for a single molecular structure, did you assess the dependence of the model quality on the conformations? Is it necessary to optimize a generated with ETKDG conformer with MMFF?\n\n2. It would be reasonable to compare your approach for Zero-shot editing with conditional generation models for small molecules.\n\n3. Please, add experiments on the CHEBI-20 benchmark for the captioning task." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "New molecular multimodal LLM framework for simultaneous incorporation of 1d 2D and 3D representations.\nNew Transformer architecture MQ-Former." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The work proposes a novel multimodal LLM framework MV-CLAM for organic chemistry and MQ-Former — multi-querying transformer model for simultaneous 1D, 2D, and 3D molecular representation learning. Authors show SOTA results in two tasks of molecule-text retrieval and molecule captioning. In addition, authors claim that their approach allows zero-shot molecule editing and molecule-related question answering." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The claim of the state-of-the-art performance for molecule captioning is not satisfied, see the results in [6].\nThere is no comparison with the other strong retrieval methods for the molecule retrieval task, i.e. RAG.\nThere are various problems with the Zero-shot editing part of the paper. The task is not formally defined. There are no metrics nor baselines for it.\n\nThe QA part is practically absent in the paper, while claimed in the abstract and results parts..\nThere are many works on molecular conformation generation [1-4], it seems that SMILES and/or 2D-graph representation is enough for neural networks to reconstruct RDKIT conformations almost perfectly. It means that 3D input possibly does not add any new information to the model. There is no comparison of the 1D+2D+3D MQ-Former vs 1D+2D models in the paper.\n\nThere is no comparison with other works on multi-modal representation learning for molecules, e.g.: [5]. \n\n[1] Zhu, Jinhua, et al. \"Direct Molecular Conformation Generation.\"\n[2] Xu, Minkai, et al. \"GeoDiff: A Geometric Diffusion Model for Molecular Conformation Generation.\" International Conference on Learning Representations.\n[3] Jing, Bowen, et al. \"Torsional diffusion for molecular conformer generation.\" Advances in Neural Information Processing Systems 35 (2022): 24240-24253.\n[4] Lee, Danyeong, et al. \"Disco: Diffusion Schrödinger bridge for molecular conformer optimization.\" Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 38. No. 12. 2024.\n[5] Manolache, Andrei, Dragos Tantaru, and Mathias Niepert. \"MolMix: A Simple Yet Effective Baseline for Multimodal Molecular Representation Learning.\" arXiv preprint arXiv:2410.07981 (2024).\n[6] Liu, Zhiyuan, et al. \"ReactXT: Understanding Molecular\" Reaction-ship\" via Reaction-Contextualized Molecule-Text Pretraining.\" arXiv preprint arXiv:2405.14225 (2024)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See 'Weaknesses' section.\n1. Could the authors provide a more detailed explanation of the novelty of MV-CLAM compared to recent related work?\n2. Why was SELFIES not considered as a molecular modality in this work, given its advantages over SMILES in tokenization and alignment with LLMs?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper integrates both 2D and 3D molecular structures to enhance the model's understanding of molecular data.\n2. The paper includes detailed figures (Figure 1-3) that clearly explain the method's framework and training scheme. \n3. And the analysis of attention maps in Appendix A.4 provides valuable insights into the model's behavior." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a framework that leverages large language models (LLMs) to interpret and generate molecular captions. The work incorporates both 2D and 3D molecular structures to provide a more comprehensive understanding of molecules." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Compared to recent related work, such as 3D-MoLM (Li et al., 2024), the innovation in MV-CLAM appears incremental. While the paper claims to incorporate both 2D and 3D molecular structures for a more comprehensive understanding, the approach seems to merely extend the 3D-MoLM framework by introducing 2D components through MAT. The proposed MQ-former architecture does not demonstrate significant structural innovations beyond existing methods. A clearer articulation of the novel contributions and architectural advantages over 3D-MoLM would be necessary to establish the work's originality.\n2. The paper considers SMILES as an important molecular modality and notes that \"1D SMILES provide compact represen tation of molecular structures\", but does not mention SELFIES (Krenn et al., 2020) at all, which has been widely adopted in recent works due to its robust characteristics and tokenization-friendly nature. SELFIES offers inherent robustness and easier tokenization that aligns well with LLMs, making it a potentially more suitable choice for this application. \n3. Some images (e.g. the big image at page 18) are not vector graphics and lack titles or captions, which makes it confusing." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* Add experimental comparison against more chemical language models on molecule captioning, e.g., nach0 [1], Text+Chem T5 [2], SciFive [3], PRESTO [4], GitMol [5].\n* For retrieval task (Table 1), is it possible to add chemical BERT-based encoders in addition to textual encoder SciBERT? (e.g., ChemBERTa)\n* Conduct additional experiments on other molecule captioning datasets such as Mol-Instructions [6] and CheBI20 [7].\n* For molecule-text retrieval, do you adopt a generative approach (e.g., GENRE [8]) or the task is formulated as a cross-modal embedding-based search by similarity (e.g., as in [9])?\n* In Figure 3, where does the textual description come from during prediction on a test set? As far as I understand the molecule captioning task, you are only given a SMILES string.\n* What is the LLaMA version you use? Add adopted HuggingFace checkpoints. \n* Even if you adopt a LLaMA with 7B parameters, MolT5 has less than 1B. Could not we just scale MolT5 to 3-5B parameters and obtain a better molecule captioning quality?\n* Why is MolT5 absent from the Table 1?\n* Add ablation study for SciBERT, 2D/3D molecule encoders, LLaMA2.\n* Add ablation study for training losses. For Molecule-text Contrasting loss, prove it requires two components. For Molecule-text Matching loss, explore the effect of negative samples.\n* Is it possible to generalize the methodology to unseen datasets and unseen SMILES? Given a SMILES, can I always obtain its 2D/3D representation and apply a pre-trained MV-CLAM model?\n\n\n\n\nTypos:\n* Line 102: transformer -> Transformer, Add reference.\n* Line 194: **$A$** under-specified.\n* Line 234: Missing citation for LoRA." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* The description of the proposed methodology is easy to follow. The paper is well written in general.\n* The paper introduces a promising multi-view for approach for the infusion of specialized chemical knowledge into general-purpose pre-trained LLMs.\n* The proposed MV-CLAM achieves state-of-the-art on PubChem324K for molecule captioning and retrieval tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces MV-CLAM, a framework utilizing a novel multi-querying transformer (MQ-Former) to enhance the alignment of multi-modal molecular representations with text. By employing a shared self-attention layer, this approach effectively consolidates 2D and 3D molecular data into query tokens, improving performance in molecule-text retrieval and captioning tasks. Additionally, it demonstrates potential for zero-shot molecule editing and molecule-related question answering, thereby facilitating better characterization of chemical structures." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* The experimental evaluation of the proposed method is conducted on a single dataset for both task: molecule captioning and molecule-text retrieval.\n* The list of baseline models on molecule captioning only includes a single T5 language model while there are more recent works, including: nach0 and Text+ChemT5. \n* Some implementation decisions are not justified well enough. This includes: (i) the choice of SciBERT as a language encoder for MQ-Former; (ii) the choice of 2D and 3D encoders; (iii) introduction of $K$ query tokens instead of a single query token for each view; (iv) the choice of LLaMA2 as an LLM. It is unclear how the experimental results would change if each of the mentioned models is replaced with another one.\n* Incomplete ablation study. The necessity of (i) Molecule-text Contrasting and (ii) Molecule-text Matching losses is not proven experimentally. For (i), it is unclear whether two loss components required or the model will perform well with a single one. For (ii), the impact of negative sample is under-explored. \n* The effect of most hyper-parameters in the method's module on the resulting performance is understudied. For instance, query token count, negative sample count in MTM loss.\n* The methodology for molecule-text retrieval is unclear from the paper.\n* The applicability of the proposed methodology to broader list of datasets is questionable: it requires 2D/3D molecular data in addition to simple SMILES string representations." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- How does MQ-Former handle scenarios where 2D and 3D molecular information may not equally contribute to textual descriptions?\n- Could the authors include more molecular tasks, such as molecule generation or property prediction, to provide a more comprehensive evaluation of MQ-Former?\n- What impact does the weighting of the multi-objective training loss have on the model’s performance?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper aims to enhance cross-modal alignment by integrating 2D and 3D molecular views.\n- The model demonstrates improvements in molecule-text retrieval and captioning performance over baseline models.\n- The paper includes case studies and examples of zero-shot molecule editing." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes MQ-Former, an extension of the Q-Former framework, which incorporates a multi-query mechanism for aligning both 2D and 3D molecular data with textual information for enhanced molecule-text retrieval and molecule captioning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The model lacks significant innovation, as MQ-Former primarily adds an extra branch to the existing Q-Former with only minor variations in training objectives.\n- Experiments are restricted to molecule-text retrieval and captioning on PubChem. The paper lacks essential molecular tasks like molecule generation and datasets like ChEBI-20.\n- The motivation for adding a branch to Q-Former, rather than simply using a 3D molecular encoder like prior works (e.g., 3D-MoLM), is unclear. \n- The paper’s presentation could be improved. Plots lack careful formatting, with text that is difficult to read due to small font sizes." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "MV-CLAM introduces MQ-Former, a model that aligns 2D and 3D molecular representations with text via a novel cross-modal projector, improving tasks like molecule-text retrieval, captioning, and question answering." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024mvclam,\ntitle={{MV}-{CLAM}: Multi-View Molecular Interpretation with Cross-Modal Projection via Language Model},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=06B23UkNid},\nnote={under review}\n}" }, "abstract": { "value": "Large language models (LLMs) have shown significant potential in the biomolecular domain, particularly by demonstrating that effective adaptation of molecular representations for LLMs can greatly improve the quality of molecular captions. Most previous works have focused on aligning unimodal molecular structures with text, overlooking the diversity of modalities. Naive approaches to aligning multi-modal molecular structures with text often lead to (1) separately aligned embeddings, (2) inconsistent textual representations, and (3) increased computational overhead. To address these challenges, we propose LLM framework MV-CLAM equipped with MQ-Former, a novel multi-querying transformer. This architecture introduces a cross-model projector facilitating the simultaneous alignment of 2D and 3D molecular representations to a unified text token. By employing a shared self-attention layer, MQ-Former preserves rich molecular embeddings across different dimensions while consolidating them into a universal molecular token. Our approach outperforms baseline models in both molecule-text retrieval and molecule captioning tasks. Additionally, our framework shows promising results for zero-shot molecule editing and molecule-related question answering. By effectively integrating multi-view molecular data into a format conducive to LLMs, our method serves as a valuable tool for enhancing the characterization and understanding of chemical structures, facilitating a more seamless transition from molecular data to textual descriptions. The source code of MV-CLAM is available in https://anonymous.4open.science/r/mv-clam-4827." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Molecule captioning", "large language models", "drug discovery", "molecule representation learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/96c1427eb118eda7038e3ca6308c3231d9a8911e.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "MV-CLAM: Multi-View Molecular Interpretation with Cross-Modal Projection via Language Model" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
06GH83hDIv
Auction-Based Regulation for Artificial Intelligence
main
Active
Regulation;Mechanisms;Auctions;Artificial Intelligence
alignment, fairness, safety, privacy, and societal considerations
3;5;5;8
3;4;3;3
2;2;3;4
2;2;2;3
3;2;4;3
5.25
3.25
2.75
2.25
3
-0.080845
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* What is the rationale of choosing the Beta and Uniform distribution (beyond what is described in line 323-324). Are there any related works that you could cite to support this choice of distributions?\n\n* What is the scaling of complexity and cost (such as evaluation and communication) as the number of the agents increase? Are there any risks of agents colluding to achieve a suboptimal safety level?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The paper is well-written and well-supported by both theoretical proofs and empirical results. It addresses the important area of AI regulatory via a multi-agent economic, game-theory type framework. There are a few assumptions to simplify the mechanism but they appear to be acceptable/realistic such as i) the regulator and the participating agents use data from the same distribution to evaluate and submit the safety level, and ii) safer models cost more to develop. The paper has help enhance the current AI regulatory work with a well-formulated framework and has a potential to have some significance in this domain." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel framework of auction-based regulatory mechanism as an asymmetric and incomplete all-pay auction. The mechanism is described mathematically and also shows good empirical results of enhancing safety and participation rates. The framework consists of a regulator and multiple participating agents. Overall, this is an interesting framework with good potentials to explore and create safer and more robust AI regulatory." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "There are no obvious weaknesses to note. While the paper is well-supported in the mathematical formulation and proofs, it perhaps could have provided more evidence on the experiments and empirical data. More description of how this framework can be applied in AI regulatory or in practice might help ground it further and make it relevant to a wider group of audiences." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "**Q1.** Is there a reasonable mechanism for estimating the market value ($v_i^d$) of a model before it is submitted to the regulator or even before the training phase begins?\n\n**Q2.** Considering that SIRA’s performance deteriorates at high safety thresholds, would a simple increase in the threshold serve as a better incentive in such cases, as it may more directly encourage safer model development?\n\n**Q3.** The authors mention that safety evaluations rely on IID assumptions for both agent and regulator data. How would the proposed mechanism adapt to non-IID settings, where the agent's training data might be maliciously poisoned, or where the regulator's evaluation data is collected through other means?\n\n**Q4.** Is the random comparison fair for all competitive agents? For example, if we have utility values such that $u_A > u_B > u_C > u_D$, and A and B are grouped together while C and D are grouped together, then B and D cannot receive the policy bonus. However, since $u_B > u_C$, this situation could be considered unfair to B." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "**Originality.** The approach presents a unique use of all-pay auction mechanisms in AI regulation, where each agent's utility is linked to model safety levels (training cost), model value (market returns), and premium (policy compensation), creating an incentive for improved safety compliance.\n\n**Quality.** The paper theoretically derives Nash Equilibria to back the proposed incentive structure, demonstrating that agents' rational behavior leads them to exceed the minimum safety threshold. The experimental results align with the theoretical model.\n\n**Clarity.** This paper is well-written and easy to follow. The authors provide clear descriptions of the auction-based model and detailed steps in the algorithmic design of SIRA, supported by both theoretical and empirical validation.\n\n**Significance.** This paper tries to tackle an essential issue in AI regulation by encouraging safer model deployment." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a new AI regulatory framework known as the Safety-Incentivized Regulatory Auction (SIRA), designed as an all-pay auction. SIRA aims to motivate model-building agents to prioritize safety beyond a minimum threshold by formulating the AI regulatory process as an asymmetric all-pay auction with incomplete information. In this framework, agents submit their models to a regulator, and those that meet or exceed a specified safety level become eligible for deployment and may also receive additional rewards based on the randomized pair comparison result. The authors theoretically prove that under some assumptions and when all agents adopt a certain strategy, the system reaches a Nash Equilibrium. Empirical results indicate that when safety threshold prices are in the middle (0.2~0.8), SIRA enhances safety compliance and agent participation by 20% and 15%, respectively compared with the basic regulatory method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**Rationality of the auction framework.** Considering the regulation process as an all-pay auction is not reasonable, at least in my opinion. Intuitively, safety-driven regulation establishes a minimum cost for the model-building agent. Every model-building agent must incur this cost, regardless of whether it can successfully meet the regulatory requirements. This represents an unavoidable exploration process within the model space. Even if we assume that all competitive agents know how to train their models to meet the safety threshold, accurately estimating the value of deployment remains a challenge. Thus, the framework may be overly simplistic in its approach to \"safety\" regulation.\n\n**Feasibility of Assumptions 1 and 2.** Assumption 1 fails when a model-building agent maliciously injects backdoor triggers into the model by altering the training dataset. Assumption 2 is also not straightforward. More cost (e.g., computational resources) does not necessarily equate to better safety. Safety also depends on other factors, such as the learning paradigm, model architecture, loss function design, and hyperparameter selection.\n\n**Performance at high thresholds.** As highlighted in the experiments, SIRA demonstrates limited advantages when safety thresholds approach the upper range (e.g., above 0.8), where its performance is similar to that of simpler reserve threshold models.\n\n1. Evan Hubinger, et al., Sleeper Agents: Training Deceptive LLMs that Persist Through Safety Training, arXiv:2401.05566." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. What is the technical challenge in the considered auction problem for AI models, compared to classic auction problems?\n\n2. Practical AI models are often very large. How can the safety of these model be evaluated? Given that the auction is done in a one shot setting, probably it is fine even if the model is large.\n\n3. I am more concerned about the compensation $v_i^p$, which needs to be provided by a regulator to implement the proposed auction algorithm. Why is this practical for existing AI models? How large does the compensation need to be? According to bidding equilibrium in Theorem 2, $v_i^p$ needs to be large for safer models. How could this be made up to compensate what the commercial AI models could achieve?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The topic considered in this paper is interesting and important. Regulations are needed to ensure AI safety.\n\n2. Theoretical results are provided whose proofs can be found in the appendix. I didn't check all the mathematical proofs.\n\n3. The paper is overall well-written and well motivated." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper addresses the challenges regulators face, particularly with the deployment of large language models that can amplify misinformation and societal division. It highlights the urgent need for effective regulatory frameworks to mitigate these risks and enhance user safety. Observing a gap in the availability of rigorous and realistic mathematical frameworks for AI regulation, the authors propose an innovative auction-based regulatory mechanism. This mechanism is designed to incentivize the development and deployment of safer AI models and encourage active participation in the regulatory process. It demonstrates through derived Nash Equilibria that the proposed auction mechanism effectively ensures that each participating agent’s optimal strategy aligns with submitting a model that exceeds a set minimum-safety threshold." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The way used by the paper to model the safety may not be realistic. It is assumed to be some safety level $s_i$ of a model $w_i$, which is expected to be less than $\\epsilon$. How is the safety measured for AI models using the metric mapping $S$ in practice? For common foundation models and LLMs, it might be hard to evaluate $S$ for $w_i$, especially given the size of $w_i$. What if a model provider take advantage of the inaccuracy of the safety evaluation to benefit itself?\n\n2. The proposed auction algorithm, together with the theoretical results and analysis seem quite standard. How does it differ from the classic all-pay auction results (for instance, Amann et al. 1996) in the setting for AI models? It is worth highlighting the technical novelty and emphasize why the proposed method is needed for AI models, given that it is claimed in Line 398-399 that \"To the best of our knowledge there are no other comparable mechanisms for safety regulation in AI.\"" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Beyond the questions listed in the Weakness section, here are some additional questions I have:\n- The framework assumes that the cost $M$ is the same across agents. This assumption seems unrealistic in practice, given that different agents may have varying models, training procedures, and resources, which makes the cost of aligning the safety levels different. If $M$ differs across agents, is there a way to adapt the framework to accommodate heterogeneous costs while maintaining its theoretical properties? \n- The paper didn't mention incentive compatibility, a key issue in auction literature. Is truthful report of $b_i$ guaranteed?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- A novel and important question, and strong motivation\n- Sound theoretical analysis\n- Genrally well-written" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors provides a formulation of the AI regulatory process as an all-pay auction, and design an auction-based regulatory mechanism that produces Nash Equilibria that induces safety considerations." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The authors' formulation of the regulatory process and safety components appears to be somewhat simplified and may diverge from current AI developments in a few key ways:\n- (Minor) The authors assume a fixed safety threshold, denoted as $\\epsilon$, for model development. While this may hold in domains such as drug approvals or medical equipment (as illustrated by the authors' N95 mask example), applying a similar framework to AI models is more challenging and complex.\n- (Minor) The model assumes that the test set used by regulators is drawn from the same distribution as the agent’s evaluation data. However, in the specific context of language models, techniques such as fine-tuning and reinforcement learning from human feedback (RLHF) can easily improve performance metrics if the evaluation distribution remains consistent. This weakens the argument that a single scalar value would sufficiently capture the intricacies of regulatory inspection.\n- The authors propose a strictly increasing relationship between safety and cost, arguing that \"safer models cost more to develop.\" However, they do not explicitly account for the trade-off between safety and the model's quality or usefulness in their framework. This omission raises questions, particularly since existing alignment approaches (e.g., RLHF) are often designed to balance helpfulness and harmlessness. In practice, a model could be made extremely safe (e.g., by providing only generic responses), but this could significantly reduce its usefulness without necessarily increasing development costs. In fact, under the authors' framework, one could submit a trivial model (e.g., one that always responds with \"Thank you for your input\"), bid the highest possible value, and meet the safety threshold $\\epsilon$ to claim the regulator's compensation. This suggests that achieving safety in some cases may not necessarily be costly unless the model’s quality or usefulness is held constant.\n- This issue could be exacerbated by the presence of open-source models like LLaMA, which may further incentivize the \"gaming\" of the regulatory system. Agents could enter the competition with low-cost variants of open-source models that prioritize safety at the expense of quality, primarily to secure the regulator’s compensation. Put it in a different way, low-quality models (which are safe but not useful) could flood the regulatory system, making it easier to claim compensation without delivering valuable AI products. This could distort incentives, where participants optimize for regulatory approval rather than producing high-quality, well-rounded models.\n\nFor the mechanism itself, a minor concern include the use of randomization, which introduces envy into the mechanism. With development costs potentially huge, this might lead to issues and discontent and distrust with the mechanism after the outcome is realized." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose an auction-based regulatory mechanism that incentivizes agents to develop and deploy safer AI models." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024auctionbased,\ntitle={Auction-Based Regulation for Artificial Intelligence},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=06GH83hDIv},\nnote={under review}\n}" }, "abstract": { "value": "In an era of \"moving fast and breaking things\", regulators have moved slowly to pick up the safety, bias, and legal pieces left in the wake of broken Artificial Intelligence (AI) deployment. Since AI models, such as large language models, are able to push misinformation and stoke division within our society, it is imperative for regulators to employ a framework that mitigates these dangers and ensures user safety. While there is much-warranted discussion about how to address the safety, bias, and legal woes of state-of-the-art AI models, the number of rigorous and realistic mathematical frameworks to regulate AI safety is lacking. We take on this challenge, proposing an auction-based regulatory mechanism that provably incentivizes model-building agents (i) to deploy safer models and (ii) to participate in the regulation process. We provably guarantee, via derived Nash Equilibria, that each participating agent's best strategy is to submit a model safer than a prescribed minimum-safety threshold. Empirical results show that our regulatory auction boosts safety and participation rates by 20% and 15% respectively, outperforming simple regulatory frameworks that merely enforce minimum safety standards." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Regulation", "Mechanisms", "Auctions", "Artificial Intelligence" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/712d0f5a1b8c06931f9d87edc2b1482e3b12a100.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/19ff95c79f6c6d7867316e5b6e85d1d4b88dec50.zip" }, "title": { "value": "Auction-Based Regulation for Artificial Intelligence" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
06ZvHHBR0i
Adversarial Multi-Agent Evaluation of Large Language Models through Iterative Debate
main
Active
LLM Evals;Adversarial analysis;Mechanism Design
foundation or frontier models, including LLMs
1;3;3;3
5;4;5;4
1;1;1;2
1;1;1;2
1;2;3;1
2.5
4.5
1.25
1.25
1.75
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- The Qwen and Gemini model versions should be specified." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The method incorporates insights from a legal decision-making perspective, and provide two frameworks that simulate the human workflow in court." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents two multi-agent systems inspired by courtroom for evaluating the outputs of LLMs. The experiments show the proposed frameworks improve accuracy compared with a single LLM as a judge." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Lack of experiments: More evaluation datasets and baselines should be incorporated into the experiments. For example, LLM-based multi-agent evaluators such as PRD [1] and ChatEval [2] could be baselines. There are many datasets in this LLM-based evaluator topic, such as AlignBench [3], AUTO-J [4] and LLMEval [5].\n\n- The presentation needs to be refined: \n\n(a) The background (in both Section 1 and Section 2) is taking up too much space. This background can be concluded to make space for evaluation details in Appendix D. \n\n**(b) The Conclusion section and Appendix A.5 are likely to be AI-generated (according to GPTZero).**\n\n- The multi-agent systems will surely use more tokens compared to LLM-as-a-judge. What is the cost per run compared to other multi-agent frameworks (such as PRD [1] and ChatEval [2])?\n\n[1] PRD: Peer Rank and Discussion Improve Large Language Model based Evaluations https://arxiv.org/abs/2307.02762\n\n[2] ChatEval: Towards Better LLM-based Evaluators through Multi-Agent Debate https://arxiv.org/abs/2308.07201\n\n[3] AlignBench: Benchmarking Chinese Alignment of Large Language Models https://aclanthology.org/2024.acl-long.624/\n\n[4] Generative Judge for Evaluating Alignment https://arxiv.org/abs/2310.05470\n\n[5] LLMEval: A Preliminary Study on How to Evaluate Large Language Models https://ojs.aaai.org/index.php/AAAI/article/view/29934" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. [results] Did the authors analyze the different types of arguments and justifications between the different ensembles in scoring answers?\n2. [results] Were there any question-answer pairs for which the ensemble methods performed particularly better than the single-judge baseline?\n3. [experiments] How many tokens were needed on average for the different ensembles and models studied?\n4. Section 3.5 is entirely in the appendix, yet referred to in the conclusion [line 499] as discussed. At the minimum, discuss the main results of a section in the main text when referring to it in the conclusion.\n5. [line 504-505] “we have conducted … our framework”: where?\n6. [C.2-C.3] How were any of these chosen? They seem completely arbitrary and unmotivated." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "[clarity] The work was generally easy to read with crisp writing. \n \n[significance] Exploring ways to improve the evaluation of LLM outputs is an important research direction that was well motivated by the authors." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work explores different processes of rating large language model (LLM) outputs using LLMs. Inspired by legal, psychological, and decision theory, the authors propose two such processes: (1) “Multi-Advocate One-Round Evaluations” (MORE) and “Single Advocate Multi-Round Evaluation” (“SAMRE”). Given a question and two (LLM) outputs, each process uses LLMs in different roles, e.g., as advocates, jurors, or judges, to (iteratively) select the “best” output. The authors further present two theorems respectively claiming that (1) aggregated multi-advocate arguments lead to greater score differentiation than those obtained using iterative debates, and (2) that multi-advocate argumentation requires fewer rounds of interaction to receive the same level of score differentiation as iterative debate. The two processes are tested on the MT-Bench dataset and compared to a baseline of a single LLM judge process using six different LLMs. The authors conclude that their experimental results provide strong empirical evidence for their proposed methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "[originality] Several works have proposed different ways of using LLM ensembles to evaluate LLM outputs. While the authors spend considerable time discussing connections to various disciplines, e.g., decision theory, legal discourse, and psychology, few tangible insights are presented as to how this specific ensemble utilizes these disciplines.\n \n[quality] The experimental results presented in this work simply do not pass the bar for this conference: (1) Only a single, limited dataset is used, (2) critical experimental details are missing, e.g., number of samples used, confidence intervals, temperatures, single-judge baseline setting, length of argument outputs, etc., (3) none of the presented theorems are tested in the experiments, e.g., claims like “greater score differentiation” and “complexity” are neither quantitatively nor qualitatively discussed in the experiments, (4) prompt sensitivity and selection is not discussed at all. This is especially damning for a work focused on improving evaluation.\n \n[significance] In essence, the work proposes (iterative) ensemble scoring using LLMs. The claim of [line 481] “strong empirical evidence for the effectiveness of the proposed LLM advocate architectures in improving the accuracy of LLM output evaluation” is greatly exaggerated and unsupported. There is good reason to believe that most of the reported improvements over a single LLM-as-judge baseline come from the greatly expanded compute budget and the series of hand-crafted prompts. Similar results might thus be obtained by simply providing a single LLM an expanded compute budget and chain-of-thought style reasoning prompts.\n \n[clarity] While the presented theorems and proofs in the appendix are an admirable attempt at introducing rigor to LLM-ensemble evaluation. Yet, they also display a limited understanding of the many practical considerations in using LLMs and the large existing literature documenting poorly understood LLM behaviors. Sweeping, unmotivated assumptions like those on line 321, or the assumption that LLMs assigned different “persona prompts” logically will obtain more diverse and stronger arguments limit the usefulness of the presented theorems." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "see summary" }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "see summary" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "I suspect that this paper may have been generated by a generative AI (such as ChatGPT). The evidence supporting this suspicion includes:\n\n1. The title of the PDF differs from the title listed on OpenReview.\n2. A significant portion of the literature cited appears to be fabricated. While I have not verified every citation, most of the references listed from 2023 onwards seem likely to be fake.\"\n\n\nFor examples:\n\n[10] Wei-Lin Chiang, Zhuohan Li, Zi Lin, Eric Wong, Zihang Zhang, Andy Zou, Lianmin Zheng, Siyuan Yu, Yi Tian, Yinghai Zhu, et al. Chatbot arena: Benchmarking open large language models in the wild. arXiv preprint arXiv:2306.01670, 2024.\n\n[25] T. Lanctot, A. Charnock, and J. Badger. Evaluating multi-agent systems in language models. In NeurIPS 2023 Workshop on Multi-Agent Systems, 2023.\n\n[26] Y. Li, D. Chen, and T. Brown. Agents as evaluators: The role of multi-agent systems in llm assessment. In Proceedings of the 2024 Conference on Neural Information Processing Systems (NeurIPS), 2024.\n\n[34] S. M. Panickssery, E. Lee, and K. Lee. Llm-based evaluators for language models: Opportunities and challenges. In Proceedings of the 2024 International Conference on Learning Representations (ICLR), 2024." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "see summary" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- In the proposed MORE framework, why employ three advocates for each answer? Are these advocates different in any way? Additionally, why does judge J provide scores s_1 and s_2 for both answers at the same time (Line 245)? Does this introduce additional bias? I assume the distributions of s_1 and s_2 obtained this way differ from the distributions obtained if s_1 and s_2 were assessed separately.\n\n- What prompts are used for jurors with different backgrounds? I also question whether merely assigning an identity through the prompt (e.g., \"A retired professor of ethics\") allows the LLM’s evaluation to truly represent the standards of that demographic. This method’s effectiveness requires further validation.\n\n- Could the authors provide an example for the stopping mechanism (Lines 262-263)?\n\n- Why does Algorithm 2 discuss the case of three jurors, when the authors claim five diverse jurors (Line 253)? The authors need to provide the correct version of Algorithm 2.\n\n- Why does the performance of the SAMRE architecture without juries in Table 1 surpass that of SAMRE?\n\n#### Minor Problems\n- The authors should cite reference papers for the theories mentioned in Lines 036-039.\n\n- The authors should clarify the version of the LLMs reported in Table 1. For example, the version of Qwen." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The writing is relatively fluent." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose a framework that interprets Large Language Models (LLMs) as advocates within an ensemble of interacting agents, allowing them to defend their answers and reach conclusions through a judge and jury system." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The framework proposed by the authors can be seen as an implementation of a multi-agent approach in the field of LLM-as-judges, with limited novelty and contribution to the community.\n\n- There is a lack of detailed description and justification for the proposed framework, with specific issues highlighted in the Questions section below.\n\n- The authors mentioned probabilistic modeling as one of the key contributions in the abstract (Line 018), but only dedicated a single sentence to this aspect in the main text (Line 395).\n\n- The authors conducted only one experiment, comparing the accuracy of their designed framework with a simple baseline, which is insufficient to support their claims. I suggest that the authors add the following experiments and comparison methods:\n - **Comparison methods:**\n - LLMs specifically trained for evaluation, such as PandaLM or Prometheus model.\n - Multiple LLM evaluators using a majority voting strategy.\n - **Experiments:**\n - A comparison of the API and time costs between the proposed MORE and SAMRE frameworks and the aforementioned comparison methods.\n - A performance comparison of the MORE and SAMRE frameworks under different parameter settings (e.g., the number of advocates).\n - A bias analysis comparing the MORE and SAMRE frameworks with the aforementioned comparison methods to demonstrate the claim of being \"unbiased\" (Line 116) and mitigating the influence of strategic behavior and individual biases (Line 119)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024adversarial,\ntitle={Adversarial Multi-Agent Evaluation of Large Language Models through Iterative Debate},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=06ZvHHBR0i},\nnote={under review}\n}" }, "abstract": { "value": "We propose a novel framework for evaluating large language model (LLM) outputs using LLMs themselves as interacting agents in an adversarial debate system. Our approach casts LLMs as advocates, judges, and juries within a structured courtroom-inspired setting. Advocate LLMs engage in iterative argumentation to refine and critique responses, while judge and jury LLMs moderate and assess the debate. We introduce a probabilistic model using Beta-Binomial distribution to analyze error reduction dynamics in this iterative process. Comparative studies of ranking versus scoring methods for LLM jurors reveal advantages of fine-grained scoring in capturing nuanced quality assessments. Experiments across diverse language tasks demonstrate our framework's superior performance in agreement with human judgments and provision of interpretable feedback compared to traditional evaluation methods. This work contributes a theoretically grounded, scalable approach to LLM evaluation that addresses limitations of existing techniques and adapts to rapid advancements in language AI technologies." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "LLM Evals", "Adversarial analysis", "Mechanism Design" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/f066ebadc0dde761530628129f3872aef2a0db6b.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Adversarial Multi-Agent Evaluation of Large Language Models through Iterative Debate" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
07N9jCfIE4
The Complexity Dynamics of Grokking
main
Active
Compression;Complexity;Generalization;Grokking;Minimum Description Length
learning theory
3;3;5;5;8
4;3;3;4;4
2;2;2;3;3
2;2;2;2;4
2;2;2;3;4
4.8
3.6
2.4
2.4
2.6
0.356348
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- What is the exact definition of the novel complexity measure introduced in this paper? And for which models is this measure well-dfined. The related conversation about compression and motivation from information theory and Kolmogorov complexity is very nice but it's unclear to me exactly how this measure is defined. Is this the content of Algorithm 2? Does the output of Algorithm 2 define the complexity measure? \n - in line 400, can you clarify which subset of grokking experiments you used. And why you used this subset. \n - in line 358 you state \"..we show that regularizing the spectral entropy leads to grokking..\" Is this an overstatement? How exactly is grokking defined quantitatively?\n - In Figure 3, you compare your regularization technique with weight decay. What is the dependence of the proposed spectral entropy regularization on the regularization weight? What behavior do you notice as you apply more or less spectral regularization? It would be nice to see the effect as the regularizaiton of the spectral entropy gradually increases.\n - Does Figure 4 include multiple seeds? Why are error bars not visible in this plot?\n\ntypos/nits\n - in Figure 2. Why include the \"ours\" distinction when all plots are \"ours\". \n - line 372, \"ideas\" to \"ideal\"" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Understanding the role of model complexity and how it should be measured is an important question in machine learning. This paper takes a good step in this direction and presents a compelling case for a complexity measure which is defined using the minimum description length and ideas from compression and information theory. The paper contributes to a deeper understanding of this 'grokking' phenomenon, which has gotten significant attention in recent years. \n\nThe paper has good theoretical motivation and makes an interesting connection with the concept of grokking in machine learning. Their intrinsic complexity measure and regularization technique are well-grounded in theoretical concepts from information theory. The authors provide clear explanations and justifications for their design choices.\n\nThe paper is logically structured and well-written and supports their theoretical claims with experiments on synthetic tasks, like modular arithmetic, for decoder transformer models." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduce a new complexity measure for neural networks and claim that this complexity measure can be used to explain 'grokking'. \"Grokking\" in machine learning is this idea that neural networks suddenly transition from memorization to generalization long after overfitting the training data. They show that their complexity measure correlates with this 'grokking' and then show how this complexity measure can be used to define a new regularization method which encourages low-rank representations. This regularizer is defined using the spectral entropy of the network weights." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The complexity measure defined and explored in this paper is positioned as a way to 'explain grokking'. \n\nComparison with other complexity measures. The empirical results in the paper are nice. But it would be good to have a fair comparison of how other complexity measures look when measure in the same scenarios. It's unfair to say that this new complexity measure \"explains\" grokking without uncovering a scenario where this complexity measure is able to capture this behavior where others are not. Otherwise, it's unclear if this is just a correlation relationship with the perceived behavior of 'grokking'. \n\nLacking discussion of the cost for computing this complexity measure. If I understand correctly, the proposed complexity measure involves a Bayesian optimization procedure for finding the optimal compression parameters, which could be computationally expensive. It would be nice to address or (ideally) investigating how difficult this measure is. This would enhance the practicality of the approach.\n\nFrom what I understand, this complexity measure is somewhat dependent on the hyperparameters, in particular the per-layer truncation threshold $\\kappa(\\tau)$. It would be nice ot have a detailed analysis even experimentally of the sensitivity to this threshold.\n\nThis paper has some very nice ideas and is worth exploring but it would be good to have a section on Limitations of their approach with an honest assessment in terms of other complexity measures and the degree to which the results are not just correlational with this 'grokking' behavior. \n\nThe paper is carefully written and has a nice re-cap of the relevant ideas from information theory and compression in ML. However, the main message of the paper was at times hard to find. For example, what is the exact definition of this new complexity? I understand it relies on coarse-graning of the network and compression using bzip2 and I think the size of the compressed network is the proxy for the complexity. Is that the definition? This paper would benefit from more clear exposition in this respect." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Have you tried applying these ideas to more complex datasets, does it compare favorably vs weight decay techniques ?\n\nBzip is not ideal to compress weights... are there other points of comparisons available?\n\nWhat is the efficiency of your compression method? How long does it take to compress?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Excellent writing, compelling ideas, nice experiments, convincing thesis, possible follow-ups." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the phenomenon of grokking through the lens of complexity theory and rate distortion theory. It proposes ways to compress model weights: \n-- Via a parameter quantization operation, as a twist on ideas of Hinton and Van Camp\n-- Via a low-rank approximation operation.\nThe idea is compress the models up to certain rate distortion thresholds, quantified by the loss. \nThey find that this compression is substantially more powerful than traditional compression methods (bzip) and argue that this is a better approximation of the Kolmogorov's complexity of the model.\nUsing this metric, the authors perform experiments on arithmetic operations and find that the grokking phase is associated with a drop from the complexity peak. Following this idea, they propose a new regularizer that apparently increases the grokking effect.\n\nOverall, this is a very well-written paper that lays out super interesting ideas and presents a compelling thesis and nice experiments. I am not sold on the idea that this is an explanation of grokking, but the observations and the conclusions are overall very interesting and I think this is a valuable contribution to understanding better what happens with grokking and is quite promising to improve learning performance of models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Is it really an explanation of grokking or more some interesting and attractive observations?\nThe experiments with the regularizer are not many." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. How did you set the learning rates for experiments? Does the performance of entropy regularization vary with different learning rates?\n2. While entropy regularization surely helps in compressing the model, I expect that both the usual L2 regularization and the entropy regularization will achieve perfect test accuracy. Could you think of a scenario where the proposed regularization technique offers a clear performance advantage over L2 regularization?\n3. Will entropy regularization also help in training larger models with more complicated datasets, where they often do not have simple representations as one-dimensional numbers?\n4. Could the computational overhead of low-rank optimization become significant, especially when applied to large models? If so, how could we mitigate them?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper is generally clear, and easy to read and interpret.\n- The paper provides nice intuitions on building generalizable neural networks, especially from the model complexity perspective.\n- The paper considers an interesting set of techniques for model compression with minimal performance loss, and tests them with experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduce a measure of neural networks’ complexity, and show that grokking could be explained by the rise and fall of the model’s complexity. The authors also propose methods for compressing neural networks via quantization and spectral entropy-based regularization, and empirically demonstrate their performances with modular arithmetic tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While the paper considers several promising ideas for model compression, there are a few limitations:\n- While the complexity explanation of grokking is interesting, it seems to overlap with the circuit efficiency explanation proposed by Varma et al. (2023). Although the authors acknowledge that model complexity is not exactly identical to efficiency or parameter norms, the added insights in this area feel somewhat limited.\n- The proposed model compression methods are quite similar to existing techniques on quantization and low-rank approximations, which raises questions about the novelty of the approach. Spectral entropy-based regularization is an interesting idea, but concerns about potential computational overhead and their applicability in more complex settings remain.\n- Lastly, the applicability of entropy regularization techniques in more complex problems beyond the modular arithmetic task raises some concerns. Additional evidence or analysis demonstrating how this technique can advance the complexity-performance Pareto frontier in more difficult tasks will strengthen the paper." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* What's the key motivation of this paper?\n* Could you elaborate on the comparison with bzip2? What is being compressed, problem setup, compressed file size, etc.?\n* What practical implications does this paper have? I would consider a method practically useful if: (1) it can speed up grokking and/or (2) it can compress real-world datasets better than baselines." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* The paper is well-written and very readable\n* The paper presents \"new\" theoretical tools to analyze neural networks\n* The analysis is a new angle to understand grokking" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes to study the grokking dynamics via the lens of information theory (minimum description length). In particular, they proposed: (1) a new compression algorithm to compress the neural network; (2) a new regularizer based on spectral entropy. They show that the spectral entropy regularizer outperforms the standard weight decay to the extent that a model with lower complexity is obtained. They claimed a factor of 30-40x improvement of the compression ratio over bzip2, which is impressive (although I can't find the file size data). However, none of the compression methods achieve a non-vacuous bound, since models are vastly over-parametrized." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* This paper deals with too many things simultaneously, which makes me a bit lost. What's *the* motivation of this paper? Otherwise, the paper reads like a collection of ok-ish results but none of them is impressive enough. For example, the idea of grokking as compression has been explored by [Liu et al.], [Humayun et al.] and [Deletang et al.]. The idea of using spectral entropy as a measure is explored in [Liu2 et al.], although it is novel to regularize the network with spectral entropy (which is unfortunately expensive).\n* The papers claim a 30-40x improvement in compression ratio, but I did not find and details or data. \n* Although this is a more theoretical paper than an experimental paper, I am not sure about its practical implications.\n\n**References**\n\n[Liu et al] Grokking as Compression: A Nonlinear Complexity Perspective, arXiv: 2310.05918\n\n[Delétang et al.] Language Modeling Is Compression, ICLR 2024\n\n[Humayun et al] Deep Networks Always Grok and Here is Why, arXiv: 2402.15555\n\n[Liu2 et al] Towards Understanding Grokking: An Effective Theory of Representation Learning, NeurIPS 2022" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Is it possible to perform the same experiments on more complex but still relatively simple datasets like MNIST or CIFAR10?\n\n - Does the generalization bound of Equation (4) only hold for finite hypothesis spaces? If yes is that a realistic assumption in practical learning settings? Moreover, could you be more precise as to why the choice of Solomonoff prior should lead to tighter bounds than other priors, such as the uniform prior over $\\mathcal{H}$?\n\n - Line 181: Why can the empirical risk be understood as the entropy of the data under the model? Is there a way to formalize this fact?\n\n - Is it possible to obtain a formal statement relating the information capacity (Equation (9)) to generalization?\n\n - To what size and precision do the parameters $\\lambda$ and $\\delta$ (Section 4) refer to in practice?\n\n - How would the training accuracy be affected by the addition of Gaussian noise in practical deep learning settings?\n\n - Can you define more precisely the notations used in Algorithm 2, such as BO.SUGGESTPARAMETERS()? More generally, can you provide more details on the Bayesian optimization procedure?\n\n - Does your regularization technique always lead to lower test accuracy compared to weight decay?\n\n - Figures 3 and 5 are not analyzed in the text, can you add some insights on the result they present?\n\n**Remarks/questions regarding lines 152 - 155 and Equation (4)** \nEven though it is not central to the paper, I have some questions about this part:\nAs I understand it, the bounds in terms of Kolmogorov complexity are obtained by choosing a good prior distribution in the bound of Langford and Seeger. It is not clear to me that such a choice of prior provides the most useful bound. More precisely, let $\\mathcal{H}$ be a finite set of hypothesis and $\\sigma : \\mathcal{H} \\to \\mathcal{H}$ be any bijection of $H$. Then $h \\mapsto 2^{K(\\sigma(h))}$ may be used as a prior instead of the usual Solomonoff prior, hence leading to a generalization bound in terms of $K(\\sigma (h))$. Yet another possibility would be to use the uniform prior over $\\mathcal{H}$. Therefore, choice of prior, and therefore the choice of Kolmogorov complexity as a generalization measure, seems to be arbitrary (please correct me if I am mistaken). Can you provide more insights as to why this leads to the most informative bound? \n\nI would be happy to discuss this further, please correct me if I misunderstood something.\n\n\n**Other minor remarks and typos**\n\n - In the introduction, the terms capacity and complexity are used before being defined, which may render the introduction hard to read. In general, more formal definitiosn of these concepts might enhance the readability of the paper. It could also help to define the notion of distortion function.\n\n - Line 122: regulariztion $\\to$ regularization\n\n - Equation (4): there is a missing parenthesis in $\\log(1/\\delta)$\n\n - There might be a clash of notation between the parameter $\\delta$ in Equations (4), (9) and (10). It would be clearer to use a different letter in each of these equations." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- Grokking is an important topic for the community\n\n- The experiments suggest that the proposed regularization technique based on spectral entropy may induce grokking, which may be of practical interest.\n\n- The experiments suggest that the rise and fall of the proposed complexity seems to be predictive of when the model starts to generalize.\n\n- The proposed regularization techniques lead to better generalization bounds than classical weight decay or no regularization." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the grokking phenomenon through compression-based approaches. Inspired by recent work on the intrinsic complexity of neural networks, and combining it with ideas from rate-distortion, quantization and low-rank approximation, the authors propose a new measure of neural networks complexity, which consists essentially of a coarse-graining procedure. They conduct experiments on simple arithmetic tasks which demonstrate that the rise and fall of this complexity might be predictive of the network starting to generalize. Moreover, this leads them to propose a new regularization scheme, based on spectral entropy, whose effect seems to reduce the total description length and the generalization bound, compared to other methods. This might lead to non-vacuous generalization bounds." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Several notions are mentioned repeatedly but without being formally defined, such as capacity, distortion or $(\\lambda,\\delta)$ (Equation (9)). It would improve the paper to include additional theoretical background and more formal definitions. \n\n - It should be made clearer how the quantities introduced in Sections 3.1 and 4 are related to generalization. For instance, is it possible to write down a theorem with explicit dependence on these quantities, or are their consideration partially based on intuitions? Can the link of these quantities with Kolmogorov complexity be made more formal?\n\n - Despite the lack of formal theorems and proofs, the experiments are done on very simple arithmetic tasks. Therefore, it is not clear (neither theoretically nor empirically) whether the results may be generalized to more complex settings. I think that at least one experiment on a small dataset like MNIST or CIFAR10 could improve the paper.\n\n - It would be useful to include an experiment comparing the performance (in terms of accuracy) with and without the proposed regularization scheme. Indeed, we see that it reduces the MDL and the generalization bound, but, if I am correct, it is not clear whether it achieves better performance overall.\n\n - We see in Figure 4 that the proposed regularization scheme achieves the lowest complexity. However, the complexity is computed by Algorithm 2 and the proposed regularization is precisely penalizing the quantity computed by algorithm 2. Therefore it does not seem surprising that it is the lowest. As an ablation study, it would be interesting to make the comparison using other complexity notions. For instance, using the actual test accuracy would be very informative, to see whether the proposed regularization leads to better performance." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We track the complexity dynamics of neural networks during training to understand grokking, using insights from the theory of Kolmogorov complexity." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024the,\ntitle={The Complexity Dynamics of Grokking},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=07N9jCfIE4},\nnote={under review}\n}" }, "abstract": { "value": "We investigate the phenomenon of generalization through the lens of compression. In particular, we study the complexity dynamics of neural networks to explain \\emph{grokking}, where networks suddenly transition from memorizing to generalizing solutions long after over-fitting the training data. To this end we introduce a new measure of intrinsic complexity for neural networks based on the theory of Kolmogorov complexity. Tracking this metric throughout network training, we find a consistent pattern in training dynamics, consisting of a rise and fall in complexity. We demonstrate that this corresponds to memorization followed by generalization. Based on insights from rate--distortion theory and the minimum description length principle, we lay out a principled approach to lossy compression of neural networks, and connect our complexity measure to explicit generalization bounds. Based on a careful analysis of information capacity in neural networks, we propose a new regularization method which encourages networks towards low-rank representations by penalizing their spectral entropy, and find that our regularizer outperforms baselines in total compression of the dataset." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Compression", "Complexity", "Generalization", "Grokking", "Minimum Description Length" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/40834cdeb5c6c11ccfa1eb404acfe0410ee2fa7b.pdf" }, "presentation": null, "primary_area": { "value": "learning theory" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "The Complexity Dynamics of Grokking" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
07ZaA3MiL0
Consistent Iterative Denoising for Robot Manipulation
main
Active
robot manipulation;consistent iterative denoising;diffusion model;imitation learning
applications to robotics, autonomy, planning
3;3;5;6
4;3;3;3
1;2;2;3
1;2;2;3
1;1;3;3
4.25
3.25
2
2
2
-0.555556
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "The authors are encouraged to address the weaknesses identified in this review and provide clarifications on the points raised in the weaknesses section." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The proposal of a deterministic denoising schedule using an SDF is an interesting alternative to traditional diffusion methods. This approach has the potential to enhance temporal consistency and guide the denoising process more directly towards the ground truth action. The ablation studies presented provide evidence supporting the effectiveness of individual components of the proposed method in specific scenarios." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel approach to denoising in diffusion models for robot manipulation tasks. The authors suggest replacing the standard noising/denoising process with a Langevin dynamics denoising field based on a signed distance function (SDF). This field serves as a deterministic gradient for denoising, aiming to improve temporal consistency and convergence to the ground truth action. Additionally, the authors introduce an alternative radial loss function to optimize the denoising network. The method is evaluated on RLBench in simulation.\n\nThe paper presents a potentially novel idea by introducing an SDF-based denoising field for diffusion in robot manipulation tasks. However, the clarity of the writing and the consistency of the mathematical formulations require improvement to ensure a thorough understanding of the proposed method. The limited number of trials and the ambiguities surrounding the evaluation metrics raise concerns about the robustness of the results. The authors should address the discrepancies observed in the convergence behavior and provide a more thorough explanation of the method's capabilities and limitations. Addressing the weaknesses and questions identified in this review would significantly strengthen the paper's contribution and impact." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The applicability of the proposed method appears limited to 2D robotics tasks with end-effector movements, such as tabletop manipulation. The authors do not demonstrate how this approach can be extended to other types of actuations, such as gripper control.\n2. The paper seems to present a potential misunderstanding regarding the capabilities of diffusion models. It is suggested that diffusion models may produce the same noisy action for different successful actions. However, diffusion models are capable of learning multimodal action distributions through the denoising process, even in cases of overlapping Gaussians.\n3. Figure 4 raises questions about the convergence behavior of both the proposed method and the standard diffusion model. In scenarios with multiple successful actions (represented by four red triangles), both methods appear to collapse to a single ground truth action. This behavior contradicts the expectation that these models should be able to learn a multimodal distribution and converge to all valid solutions.\n4. The paper lacks clarity on why the proposed method (CIDM) converges to only one ground truth action in Figure 4, despite demonstrating the ability to learn a bimodal distribution in Figure 2. It remains unclear why the method does not capture the four-modal distribution evident in the task.\n\nThere are a number of places in the text where the authors could provide clarification.\n\n### Confusing text\n\n1. Line 83: The statement \"robot manipulation prefers to sample initial actions over the entire action space\" is unclear. It is possible the authors intend to convey that the training data covers the entire action space, but the phrasing is ambiguous and requires clarification.\n2. Line 175: The phrase \"After eliminating the effects of specific successful action $\\hat{y}$\" is vague. It is unclear what is meant by \"eliminating the effects.\" Specifying the mathematical operation, such as marginalizing out $\\hat{y}$, would improve clarity.\n3. Figure 3 caption, line 337: The caption could improve significantly. It states that the list of 14 tasks tasks are \"highly representative.\" Highly representative of what?\n4. The paper lacks details on the experimental setup, particularly regarding the number of trials and seeds used for evaluation. The authors state that results are based on four trials per task, but it is unclear how many random seeds were used to ensure the reliability of the results.\n5. The metric \"success probability\" requires further explanation. If it is calculated based on four trials per task, the possible values should be limited to [0, 25, 50, 75, 100]%. However, Table 2 presents values such as 82.7%, suggesting a different calculation method or a larger number of trials.\n6. Equations 12 and 13 contain an error. The 2-norm $\\|y - \\hat{y}\\|$ cannot be less than a negative number ($c<0$).\n7. Equations 12, 13, and 14 define the denoising field in a way that seems counterintuitive. The denoising field should be $\\epsilon_x(y) = \\hat{y} - y$ to ensure that a single denoising step, $y + \\epsilon_x(y)$, results in the ground truth action $\\hat{y}$. The gradient should point towards the ground truth, not away from it.\n8. Line 285: The authors claim to be learning a denoising field independent of $\\hat{y}$. However, the training data includes $\\hat{y}$, suggesting that the model likely learns $\\hat{y}$ implicitly. This statement requires clarification or justification.\n9. Table 1 caption: The caption states that underlined text indicates \"suboptimal performance for each column,\". Does this mean the second-best performance or some other criterion? Additionally, not every column has an underlined number.\n\nThe paper could benefit from additional explanations and clarifications to enhance the reader's understanding of the proposed method. \n<!-- The authors could have utilized the extra to address some of the ambiguities and provide more detailed insights. -->\n\n## Minor Typos\n\n1. Line 15: \"CIDM\" is used before its introduction in line 144.\n2. Line 77: \"noises supervision signals\" should be \"noise supervision signals.\"\n3. Line 93: \"Additionally, We\" should be \"Additionally, we.\"\n4. Equation 1, line 161: If referring to the DDPM scheduler, the term inside the square root should be $(1 - \\bar{\\alpha_t})$, not $(1 - \\bar{\\alpha_t^2})$.\n5. Line 28: The statement \"Robot manipulation mainly involves two steps, acquiring effective scene representation and predicting correct actions\" oversimplifies the complexity of robot manipulation, which also involves elements of execution on hardware and reactive control." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Can the author provide more detailed explaination of figure1, I'm not sure I understood (b) correctly, especially the blue circles in it.\n\n- Equation 12 and 13 seem to have some typos, I think it should be $\\exists c>0$.\n\n- The value of denoised field (eq 14) is based on the value of 2-norm between noisy action and successful action. The implicit assumption here is that the 2-norm in the action space is well defined. This assumption is not obvious as the common action space may contains position, angle, linear velocity, angular velocity, torque... The 2-norm between two actions doesn't necessarily make sense." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper introduces a novel approach to robot manipulation using a diffusion model, addressing limitations of traditional methods by incorporating a consistent denoising field and a radial loss function.\n- Empirical rigor is demonstrated through extensive experiments on the RLBench benchmark, showing clear performance gains over baseline methods. The ablation studies further validate the contribution of each CIDM component, enhancing confidence in the results.\n- By addressing practical challenges in action prediction for complex robot tasks, CIDM enhances the applicability of diffusion models." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a novel Consistent Iterative Denoising Model (CIDM) aimed at improving action prediction in robot manipulation tasks by addressing issues with diffusion models, specifically noise inconsistency and timestep variations. CIDM introduces two core innovations: (1) a consistent denoising field, which ensures clear denoising directions and temporal consistency across actions, and (2) a radial loss function that emphasizes actions with minimal noise to achieve more accurate iterative denoising." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- While the paper presents a novel application of iterative denoising to robot manipulation, it lacks a theoretical analysis(Like some other articles on diffusion dynamics$^{[1]}$). Highlighting unique theoretical insights or algorithmic innovations would better justify CIDM’s position in the field.\n\n- The introduction of a radial loss function, while conceptually sound, lacks comprehensive theoretical grounding or references to similar existing loss functions used in other domains. This gap makes it challenging to assess the robustness and scalability of the loss. Providing a more detailed theoretical analysis or justifying it with additional related work on spatial consistency in generative models could clarify its effectiveness.\n\n- The current evaluation focuses on RLBench, but it would significantly benefit from testing in other robotic benchmarks or real-world scenarios to assess generalization capabilities. Evaluating CIDM's performance across tasks with varying levels of action complexity, such as multi-step manipulation in dynamic environments, would enhance the robustness claims.\n\n- Temporal consistency is claimed to improve denoising stability across timesteps, but the scalability of this approach remains uncertain for long-duration tasks. Additional evaluations on tasks requiring extended sequences of actions (beyond 100 timesteps) could illustrate CIDM’s scalability and stability in prolonged scenarios.\n\n[1] Liu, X., Gong, C., & Liu, Q. (2022). Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. the paper mentions they use CLIP encoder to extract the embedding from text instructions and image observations. However, it doesn’t mention how they process the robot state information in their framework. Moreover, if there are multiple views, how do they fuse the embedding from different views? The paper needs to add some clarification for those details. \n2. For qualitative results, the author only shows the stack blocks tasks. It would be interesting to see more qualitative rollouts of other tasks. The paper mentions the method is good for the tasks that has multiple success actions. However, the failure case it show when compared to 3D Diffusor Actor in Appendix A.4 is not multi-modal actions. To solidify the paper’s claim, it is better to include some multi-modal actions example and visualize the denoising process." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper clearly illustrates the problem, their motivation to propose the new components to diffusion models and the contributions. \n2. The paper provides theoretical analysis to formalize the problem. \n3. The paper shows good results on RL Bench and does ablation studies over the different proposed components to show the importance of each part." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper aims to solve the inconsistent noise supervision issue in diffusion models. The inconsistency comes from two sources. One source is the multi-modal action labels. The other is time-varying noise in denoising steps. They propose a novel consistent iterative denoising model and a new radial loss to address this issue. The proposed method is tested on RL Bench against other baselines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. In related work, authors list a lot of recent related work in diffusion models. However, some related work is summarized not very clearly. For example, “Inversion by Direct Iteration (Delbracio & Milanfar, 2023) pursues a simpler form to get rid of the limitations of traditional diffusion.” this sentence is confusing because it is not clear to me what things the paper tries to simplify and what limitations they are getting rid of. Another issue is that the paper mentions that the recent work in diffusion models try to speed up the denoising process and provide in-depth analysis of diffusion models. However, these are not directly related to the inconsistency problem this paper tries to solve. Therefore, I think the paper should reorganize this section so that the connection and difference between related work and the proposed method is more clear. \n2. The main advantage of the proposed method as mentioned by the paper is consistent supervision from multiple successful actions (i.e., multi-modality). However, RL Bench demonstrations is not a demonstration dataset that has obvious multi-modality. A recent paper has proposed a dataset benchmark[1] for evaluation of multi-modal behaviors. It would be interesting to see how the proposed methods and the baselines behave in this benchmark\n3. The proposed method’s improvement over previous methods on Multi-view is not very significant with 82.3% average success rate compared to RVT2’s 81.4%. For each task, the proposed method has the highest success rate only in 7 out of 16 tasks. Therefore, it seems that the performance improvement is limited. \n4. In the results sections, the paper only includes the mean but it is reasonable to also include the std for the success rate as this is usually reported in the other papers.\n\n[1] Xiaogang Jia, Denis Blessing, Xinkai Jiang, Moritz Reuss, Atalay Donat, Rudolf Lioutikov, and Gerhard Neumann. Towards diverse behaviors: A benchmark for imitation learning with human demonstrations. In The Twelfth International Conference on Learning Representations, 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "Which architecture did you use for the denoising network? For a fair comparison, it would be helpful to know how the architectures and the number of parameters are controlled across models." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "Diffusion models for robotic manipulation indeed behave very differently from denoising in the pixel space of conventional diffusion models for image generation. Unlike pixel-space diffusion, where values are confined within a compact [0,1] range, gripper pose space is unbounded. This often causes diffusion models to exhibit underconvergent behavior as illustrated in Figure 4. The proposed method appears to offer some mitigation for this important issue." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors argue that the denoising targets of conventional diffusion models are inconsistent, making them unsuitable for robotic manipulation tasks, as they 1) vanish near local optima and 2) are time-varying. To address this, the authors propose the Consistent Iterative Denoising Model (CIDM), which learns from a time-invariant denoising field combined with a radial loss function. In this proposed denoising field and radial loss function, distant GT actions have less influence than closer ones. The authors compare CIDM's performance against state-of-the-art text-conditioned visual robotic manipulation methods, such as 3D Diffuser Actor and RVT2, in the RLBench settings used in PerAct and GNFactor." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "### **Weakness 1. Lacking Probabilistic Justification**\n\nThe authors argue that the score function of conventional diffusion models being zero at local minima is **biased** and should instead always point toward the nearest target. They state:\n>\n> \"The first problem is that the score function $\\nabla_{y_t} \\log p_{t} (y_t)$ is biased as a denoising field... Since the reasonable denoising field always makes noisy action closer to its target successful\naction...\"\n> \nHowever, this claim is debatable. I would contend that CIDM itself introduces bias while conventional diffusion models are unbiased. For a model to be unbiased, the denoising field should be almost zero near saddle points, as there’s no justification for favoring one specific target. In conventional diffusion models, added noise serves to break such ties. Conversely, CIDM imposes a strong preference toward nearby targets among multiple possible answers, making the output highly sensitive to the initial conditions of the denoising process. This arbitrary choice of denoising field introduces bias in CIDM, unless the distribution of initial points is meticulously selected (as in flow-matching models). Alternatively, one could adopt the Annealed Langevin MCMC viewpoint proposed by Song & Ermon (2019). In this case, however, one should carefully choose the form of noise and denoising target so as to guarantee the learned to model to be unbiased. These considerations are not thoroughly addressed in the paper. Consequently, there's no assurance that the samples $y$, generated by CIDM, follow the actual target policy $y\\sim p_{data}(y|x)$. \n\n### **Weakness 2. Claimed Benefit not Well-supported**\nAs discussed in Weakness 1, CIDM introduces bias. However, as demonstrated by the Cold Diffusion paper, neural networks can still produce reasonable samples across various corruption processes, even if biased. Thus, bias isn’t necessarily detrimental when a meaningful trade-off is achieved. However, for CIDM, the specific benefits of this trade-off remain unclear.\n\nFirstly, it is questionable whether the issue presented in Figure 4 is due to inconsistent training objectives. Rather, it could be due to the inference-time denoising scheduler. For instance, I observe that increasing the number of denoising iterations or lowering the temperature at smaller noise scales often resolves the underconvergence issue shown in Figure 4. Better denoising strategies, such as DDIM, could also be an option.\n\nSecondly, the authors argue that conventional denoising target is difficult to learn, and suggest that CIDM alleviates this issue by using a more consistent target. However, I’m not convinced that inconsistency is the only factor at play here. The primary issue could instead be the precision of the action. Diffusion models often struggle with generating highly precise actions due to their inherently noisy and complicated denoising pipeline. In contrast, models specifically optimized for precision, like RVT2, outperform CIDM and 3D Diffuser Actor in precision tasks such as block stacking as suggested in the experimental result. If the authors argue that inconsistent denoising targets hinder learning, they should provide evidence that biasing the target with a more consistent approach indeed reduces learning variance, i.e., by showing that CIDM demonstrates improved data efficiency or lower performance variance across different seeds.\n\n### **Weakness 3. Insignificant Result**\nThe experimental results are not significant, as only 25 test episodes were conducted per task. For the 18 tasks in the PerAct setting, this amounts to 450 trials. With CIDM achieving an 82.3% success rate, the 90% confidence interval is 0.78991 ≤ p ≤ 0.85134. Thus, a 1% improvement over state-of-the-art methods like RVT2 and 3D Diffuser Actor does not offer substantial evidence of CIDM’s superiority.\n\nEven if the reported performance gain holds, it does not sufficiently justify the bias introduced by CIDM. For example, if an expert policy selects a red block with 90% probability and a yellow block with 10%, we would expect the learned policy to favor red blocks proportionally. This expectation does not hold for CIDM. Every generative model has precision-diversity trade-off, and the RLBench success rate primarily measures precision over diversity. Therefore, sacrificing sample diversity for only a 1% performance gain does not make a lot of sense for me." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024consistent,\ntitle={Consistent Iterative Denoising for Robot Manipulation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=07ZaA3MiL0},\nnote={under review}\n}" }, "abstract": { "value": "Robot manipulation in complex scenarios usually involves multiple successful actions, which requires generative models to estimate the distribution of various successful actions. \nIn recent years, the diffusion model has been widely studied in many robot manipulation tasks.\nHowever, the diffusion model experiences inconsistent noise supervision across various action labels and denoising timesteps, which compromises accurate action prediction.\nOn the one hand, CIDM designs new noise supervision to avoid interference between different successful actions, leading to consistent denoising directions.\nOn the other hand, CIDM unifies all denoising timesteps, avoiding inconsistent predictions of the diffusion model over different timesteps.\nMoreover, we also designed a novel radial loss to make the model focus on denoising results rather than iterative process routes.\nOur method achieves a new state-of-the-art performance on RLBench with the highest success rate of 82.3\\% on a multi-view setup and 83.9\\% on a single-view setup." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "robot manipulation", "consistent iterative denoising", "diffusion model", "imitation learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/444137bba6fc98bf0f71907783db1f09bf5eeaef.pdf" }, "presentation": null, "primary_area": { "value": "applications to robotics, autonomy, planning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Consistent Iterative Denoising for Robot Manipulation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
07cehZ97Xb
How to Build a Pre-trained Multimodal model for Simultaneously Chatting and Decision-making?
main
Active
vision language action model; decision making; autonomous driving; multimodal
applications to robotics, autonomy, planning
3;3;5
5;3;3
3;1;1
2;1;2
4;2;2
3.666667
3.666667
1.666667
1.666667
2.666667
-0.5
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "(Q1) L154: Do you also include model textual response \\hat{w}_i, i = {1,..,H} in w_i?\n\n(Q2) Eq 1: \\hat{w} is overloaded as both the model textual response and the embeddings of text inputs\n\n(Q3) Eq 1: Each of w_i might have a different number of tokens (different from n). Do you pad them to n or is the index (n_i) dropped for brevity? That is: (w_i^1, w_i^2….w_i^{n_i}) instead of just (w_i^1, w_i^2….w_i^{n})\n\n(Q4) L222-L225: The observed phenomenon is not clear here. Referring to the appendix also doesn’t add more details, apart from the empirical observation. Can the authors describe this with an example? \n\n(Q5) VLA4CD (no-language): What is the architecture, inputs for this model? Ideally, the human question in the input and the {s_t^{l+1}, .., s_t^{l+n}} must be removed while training.\n\n(Q6) L413: How did you balance the two losses for DriverGPT4? Did you have a hyperparameter search for the loss weights similar to your approach?\n\n(Q7) L477: The reasoning here is heavily dependent on the discretization strategy used for each environment. How were the actions discretized for this environment? Was there a hyperparameter search performed to get the best strategy?\n\n(Q8) L140-142: How is this problem avoided in the current setup? It’s not clear in the text here.\n* Action space dimension is small, i.e., 2 (acceleration and steering) How does this scale with more variables?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "(S1) In general, the intended direction of this work, i.e., a model that can take actions while retaining the ability to generate textual responses to a user is useful. Please see weaknesses for further discussion.\n\n(S2) The technical details as presented in the paper are easy to understand and follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The current manuscript proposes to build a large language model (LLM) capable of understanding multiple modalities like text, vision, and actions; and producing them as outputs. In particular, it develops a Visual Language Action model for Chatting and Decision Making (VLA4CD) that produces continuous actions without losing its ability to chat with a user simultaneously. Notably, the action space is not discretized and kept continuous, unlike prior works in this area. The paper also demonstrates experiments on CARLA dataset to claim that this approach is effective and can provide real-time decision making compared to prior art, while retaining its text interaction capability." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(W1) The current manuscript suffers from a clear lack of motivation for why we need a model that can produce both actions and also “chat” (L21, for instance) with a user. There are two main problems here:\n* Throughout, the ability of “chatting with people” (L88) has not been characterized well. It is not open-ended dialog on any topic but rather an explanation of what actions to take or why it has taken a certain action in a given situation. This is misleading as currently phrased.\n* Much of the motivation is around “a human driver can operate while chatting with a friend”, which does not apply to why we need a unified model. For instance, why not have an actuation model and an open-ended dialog model in the autonomous vehicle to achieve the above desired motivation? This indicates the lack of a clear motivation from an application standpoint.\n\n\n(W2) Even if one were to scope the “chatting with users” ability down to producing explanations as responses to a fixed set of templated questions (see A.10), the manuscript does not follow through via corresponding experiments. Both actions and text-generation capability has been evaluated independently, once again begging the question as to why such a unified system is useful. There are no experiments to verify the following:\n* The model actually actuates based on the textual outputs? I.e., if the model responds with “I will take the right lane in 20 mins”, does it actually do that?\n* Are these textual explanations correct/sound given the state of the environment?\n* What is the correlation of the GPT-4o score evaluation with human evaluation? \n\n\n(W3) There are some concerns around the experimental validation of the proposed methodology:\n* The reported experiments on town03 and town04 from the CARLA environment do not seem to match with any of the existing benchmarks with prior works (C, D). \n* To further exacerbate this issue, none of the baseline results are from literature and have been reported based on reproductions in this work. \n* Missing baselines, see [A] for more information.\nThis raises serious questions about the efficacy and usefulness of the proposed methods from an empirical standpoint. Why were existing, standardized benchmarks not used for model comparisons? Request the authors to address these concerns without which the benefits of this approach will remain unclear.\n\n\nReferences\n* [A] DriveMLM: Aligning Multi-Modal Large Language Models with Behavioral Planning States for Autonomous Driving. https://arxiv.org/pdf/2312.09245.\n* [B] Think2Drive: Efficient Reinforcement Learning by Thinking with Latent World Model for Autonomous Driving (in CARLA-v2). https://arxiv.org/pdf/2402.16720\n* [C] CARLA Autonomous Driving Leaderboard. https://leaderboard.carla.org/\n* [D] TransFuser: Imitation with Transformer-Based Sensor Fusion for Autonomous Driving. https://arxiv.org/pdf/2205.15997" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "- How does the model compare to DriveGPT4 and why does DriveGPT4 do so bad? DriveGPT4 is doing exactly same thing as this model aims to do (text generation + action generation).\n- Why are there no use case of the model actually chatting? How do the authors define chatting? The example of the introduction mentions the authors are inspired by human driver talking to a friend while driving, but the model doesn't actually engage in free form chat that goes beyond a single step.\n- How does author plan to make the model robust to noise when exposed to unrestricted chat?\n- Why should we add language generation capability to VLAs? The motivation for that seems non-existent in this paper and there's no novel use case of the generated language.\n- Why do the authors think using separate loss for language and action generation (unlike DriveGPT4) improves the decision making performance?\n- Why do the authors only focus on the self-driving task?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The main contribution of this work is that the authors add a language generation capability to VLA in self-driving scenarios.\n- The paper is well-structured, making it easy to read and understand the authors' approach.\n- The finding that separating language and action prediction loss can improve decision-making is a significant contribution that provides valuable insights into how VLAs can be effectively trained. It is encouraging to see that this is empirically demonstrated to be useful in self-driving scenarios. However, it is concerning that introducing some language noise into the training dataset can have a considerable impact on decision-making processes. Since real-world datasets will inevitably contain substantial noise, developing methods to ensure robustness against such noise is essential for the model's practical application." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "- The authors propose VLA4CD, a model for self-driving that is capable of generating both language and action tokens. \n- VLA4CD is trained using three different types of loss functions: language generation, action generation, and image reconstruction. \n- The trained model demonstrates superior performance in both decision-making and question answering compared to models such as DriverGPT4 and OpenVLA in the gym-carla environment." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- I don't quite understand why VLAs need to chat based on the author's motivation. Chatting is an inherently multi-turn conversation with a specific topic, but example of such capability of the model is completely lacking. I wonder what the authors' definition of chatting is. The model doesn't actually \"chat\" but simply outputs action description. It is far from the example in the introduction where authors want to build a model that can talk with a friend while driving.\n- Text generation has already been explored with DriveGPT4. In this paper, text generation is not used for any novel applications other than simply translating action tokens into language. I fail to understand why does the author claim text+action generation is something novel since there's already a model that does it. \n- Adding chat capabilities could potentially make the model less robust when exposed to noise in language interactions. Since the model learns to associate language with specific action tokens, any slight disruption to that association (e.g., due to noise) could significantly impair its action prediction performance. If the model can engage in unrestricted conversation, it is likely to encounter more noise, which could seriously affect its decision-making abilities, which is the most important goal of VLAs. It might be more effective for VLAs to focus solely on action prediction and incorporate chat functionality with separate models. With the current motivation, it seems there is no strong rationale or necessity to integrate chat capabilities into VLAs.\n- Following the point above, I feel like the paper could be better framed on how to manage loss when training VLAs, which is a much interesting topic.\n\nTypo: Figure 1 interatcion" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. What is the average number of images per training and inference case?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "1. The problem that developing a chatting and simultanenous decision making, itself is underexplored and important.\n2. The proposed model gives both reasonable question answering output and reasonable action output." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper opposed to simulating MLLMs as human in a real-world situation that require both chatting and decision making. For example, human drive can drive safely while having conversations with passengers. This is an important application problem in autonomous driving system." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. I don't buy the idea that by simply concatenating the question answer data and action prediction data together and supervised finetune the LLaVA-like MLLM can solve the proposed problem. As described in the abstract, driving and chatting simultaneously, however the proposed LLM-based model is autoregressive decoding. From the architecture overview, we can see this proposed model can only provide a prediction after a very long answer output. No inference speedup or simultaneous decoding technology is being used or proposed to achieve this.\n2. The only contribution to me seems combining action prediction and question answer data, which is very trivial. No siginificant improvement is achieved compared with specialist model in each single task. And the approach to combine these two tasks chatting and decision making tasks are not actually achieving prediction both but sequentially.\n3. The paper neeeds to be revised in the writing. Such as notions in Figure 1 is totally missing. Training and architecture details are missing in Experiments section, etc." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose an approach to build a pre-trained multimodal model for simultaneously chatting and decision-making." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024how,\ntitle={How to Build a Pre-trained Multimodal model for Simultaneously Chatting and Decision-making?},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=07cehZ97Xb},\nnote={under review}\n}" }, "abstract": { "value": "Existing large pre-trained models typically map text input to text output in an end-to-end manner, such as ChatGPT, or map a segment of text input to a hierarchy of action decisions, such as OpenVLA. However, humans can simultaneously generate text and actions when receiving specific input signals. For example, a driver can make precise driving decisions while conversing with a friend in the passenger seat. Motivated by this observation, we consider the following question in this work: is it possible to construct a pre-trained model that can provide both language interaction and precise decision-making capabilities in dynamic open scenarios. We provide a definitive answer to this question by developing a new model architecture termed Visual Language Action model for Chatting and Decision Making (VLA4CD), and further demonstrating its performance in challenging automonous driving tasks. We build VLA4CD on the basis of transformer-based LLM architecture. Specifically, we leverage LoRA to fine-tune a pre-trained LLM with data of multiple modalities covering language, visual, and action. Unlike the existing LoRA operations used for LLM fine-tuning, we have designed new computational modules and training cost functions for VLA4CD. These designs enable VLA4CD to provide continuous-valued action decisions while outputting text responses. In contrast, existing LLMs can only output text responses, and current VLA models can only output action decisions. Moreover, these VLA models handle action data by discretizing and then tokenizing the discretized actions, a method unsuitable for complex decision-making tasks involving high-dimensional continuous-valued action vectors, such as autonomous driving. The extensive experimental results on the closed-loop autonomous driving platform CARLA validate that: (1) the model construction method we proposed is effective; (2) compared to the state-of-the-art VLA model, VLA4CD can provide more accurate real-time decision-making while retaining the text interaction capability inherent to LLMs." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "vision language action model; decision making; autonomous driving; multimodal" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/55d50e8073e31524d6e9cef3c6f6f2b6202a851a.pdf" }, "presentation": null, "primary_area": { "value": "applications to robotics, autonomy, planning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "How to Build a Pre-trained Multimodal model for Simultaneously Chatting and Decision-making?" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
07yvxWDSla
Synthetic continued pretraining
main
Active
large language model;synthetic data;continued pretraining
foundation or frontier models, including LLMs
6;6;8;8
3;4;3;4
3;2;4;4
2;2;3;3
2;3;3;4
7
3.5
3.25
2.5
3
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "It says “We generate data for pairs D_{Ei, Ej} and triplets D_{Ei, Ej, Ek} in our experiments”. I wonder if the authors have any intuition about how performance changes with the size of subset k." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "* The problem the work addresses is important.\n* Experimental results show that this method scales better than simple paraphrasing or direct pretraining, and that retrieval-augmented generation further boosts performance of this model. \n* The authors also present a theoretical model explaining EntiGraph’s log-linear scaling pattern, providing insights into the mechanics of synthetic data’s impact on learning efficiency.\n* Paper is clear and well-written." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the problem of data inefficiency in pretraining language models. Current pretraining corpora may not generalize effectively and models may benefit from structured, repeated, diverse representations of knowledge. \n\nThe proposed is a two-step process that (1) extracts entities from the corpus and then (2) extracts relationship information amongst a subset of the entities.\n\nExperimentation uses the QuALITY corpus and dataset, which is a benchmark for long-document reading comprehension. Evaluation compares with relevant baselines like training on the original QuALITY corpus and a corpus containing rephrasings." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While the experiments focus on the QuALITY corpus, it remains unclear how well this would apply to other domain-specific corpora or more complex fields (e.g., legal or math data)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "For the data generator, what type of models are necessary to have good performance? (why use GPT4 and not open-source models)\nThe paper shows that the generated data is useful, but how does it look like? (is it good quality text, factual, natural looking, ...) \nWhat is the significance of section 6?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The paper does a good job at demonstrating the benefit of the synthetically generated data, by including relevant natural baselines. \nThe proposed method seem to work well and can be useful for continued pre-training tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a method to continue pretraining LLM with a synthetic data augmentation method. The method is based on expanding the training corpus with many verbalizations of the entity graph present in the training corpus. It moves from a sparsely verbalized entity graph to a more densily verbalized one by using only the source documents and prompting LLMs to generate the new tokens.\n\nThe paper shows that the method is beneficial for downstream tasks in closed- and open-book QA as well as RAG. \n\nOverall, I think the paper is worthy of acceptance, it propose a clean method with good results and the experiments are fairly convincing." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The work relies on commercial and closed-source models (GPT4) for generating the synthetic data, making this work non-reproducible. Since the data generation process is the central contribution, it would have been interesting to have insights about how well different models can perform this data generation task. \n\nThe paper proposes only extrinsic evaluation of the generated data but does not provide intrinsic measures, i.e., how good is the generated text? \n\nIn my opinion, section 6 is not particularly useful. It is unnecessarily mathematical, based on simplistic assumptions and does not bring useful insights (For many continuously increasing lines, there anyway exists a mixture-of-exponential that fit it)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. How sensitive is the synthetic pretraining process to the specific hyperparameters used for entity extraction and relation analysis? Would tuning these parameters significantly affect the generated corpus quality?\n\n2. How does the synthetic corpus compare to a manually curated dataset in terms of quality and impact on downstream tasks?\n\n3. Could EntiGraph be used effectively in scenarios where entities are ambiguous or domain-specific (e.g., medical or legal texts)?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The proposed EntiGraph approach for generating synthetic data is well-motivated and demonstrates clear improvements in downstream performance, as shown by the experimental results.\n2. The paper includes comprehensive evaluations, including closed-book QA, instruction following, and open-book settings. The results show a significant performance improvement over baselines, validating the effectiveness of synthetic pretraining.\n3. The authors provide a theoretical analysis of EntiGraph's effectiveness, which aligns well with empirical observations and provides a deeper understanding of its scaling properties." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes \"synthetic continued pretraining\" to enhance language models' domain adaptation efficiency, particularly when adapting to a small corpus of domain-specific documents. The authors introduce EntiGraph, a synthetic data augmentation algorithm that constructs a knowledge graph from the entities in the original corpus and synthesizes diverse text to enhance the pretraining process. The approach was evaluated using a reading comprehension dataset, QuALITY, showing notable improvements in model performance with both closed-book and open-book scenarios." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The evaluation relies on the QuALITY dataset, which may not be representative of all types of small corpora. A broader range of datasets, particularly from diverse domains, would make the results more generalizable.\n2. Although the authors attempt to mitigate hallucinations by grounding synthetic data generation in the original corpus, the risk of generating inaccurate information is inherent in using a language model for synthetic generation. This aspect needs further empirical examination, such as quantitative metrics to evaluate hallucination rates.\n3. The approach relies on using strong language models like GPT-4 for synthetic data generation. The practical feasibility of using this approach might be limited if users do not have access to such models due to their computational cost. What if it was replaced with LLama 3 8B?\n4. While the paper includes useful baselines such as \"Rephrase CPT,\" more comparisons with alternative data augmentation or synthetic generation methods from recent literature could strengthen the claim that EntiGraph is an effective strategy." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "The paper could be more robust if you had more than just the QuALITY dataset. It is a perennial problem to find hard datasets to work with, so I understand this may be all there is for now, but given the chance I would attempt to reproduce the results on a different set. The authors mention linear algebra (a much harder topic, I think): is there any corpus for that subject?\n\nThe presentation of how exactly you generate the text to train Llama 3 8B with EntiGraph is still a little fuzzy to me, in particular it would be nice to see some examples of what you generated. It is helpful to have the prompts, but some output always grounds the presentation. \n\nFinally, I imagine GPT-4t made errors in producing the training data--did you search for these? Even at a quick glance how often did it make errors, and what, if anything, did you do to filter them out?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The experiments are convincing that the EntiGraph approach improves the LLM's ability to accurately answer questions about a small corpus. In particular the closed-book results in Figure 3 show that the EntiGraph approach leads to far more salient claims per false claim than any of the other models, including GPT-4, or training the LLM (Llama 3 8B). The benefit is substantially less in the open-book RAG case, but there is still substantial improvement. The theoretical model to explain how the model improves QA accuracy with increasing tokens provides some good intuition as to how the model learns. \n\nOverall the text is clear and easy to read." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses how to train LLMs in a data scarce regime, given that LLMs require O(1000) examples of a fact to actually \"learn\" it. This has applications both to niche corpora (e.g., mathematics textbooks) as well as to training larger models once all human text is exhausted. The authors propose to use a pre-trained LLM to (1) extract entities and summaries from a comparatively small, niche corpus, and (2) use the extracted entities to generate rephrased assertions about those entities, to facilitate learning by a second (here, smaller) LLM. They experiment with a 1.3M token reading comprehension dataset, and test the approach against several baselines, including closed-book tests on the LLM used to extract entities and the rephrased text used to train the second LLM. Finally, the authors present a mathematical model through which they attempt to understand the training behavior of this data augmentation system." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I still have reservations that there is some amount of distillation of GPT-4 into their Llama 3 8B: it seems possible to me that a RAG-prompted GPT4 could generate additional information that is somehow \"unlocked\" by the RAG prompt, but which the closed-book version was unable to access. At the risk of anthropomorphizing, this is akin to a human getting a visual or audio cue and suddenly recalling whole complex memories. It would make the paper stronger to dig into the results of entity extraction and the generated text to see whether it is rephrasing/paraphrasing, or whether possibly actual new information is injected.\n\nEven so, it would have helped this reader to have pointed out the significance of the closed book experiments earlier on. It isn't stated explicitly until the Limitations section.\n\nI don't feel particularly qualified to check your proofs of theorems, and moreover I think the main value of the theoretical model is to help the reader understand intuitively why the approach works (these may be connected observations). Is all of the theory necessary? Perhaps a simulation would do as well?\n\nAnother issue is that much of the benefit of the approach vanishes (though not completely) when using a RAG model directly. Is this approach worth the extra training, given the modest gains? The core problem, really, is how many examples LLMs take to learn anything well. This paper finds a way to side-step that successfully, but doesn't solve it directly." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024synthetic,\ntitle={Synthetic continued pretraining},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=07yvxWDSla},\nnote={under review}\n}" }, "abstract": { "value": "Pretraining on large-scale, unstructured internet text enables language models to acquire a significant amount of world knowledge.\nHowever, this knowledge acquisition is data-inefficient---to learn a fact, models must be trained on hundreds to thousands of diverse representations of it.\nThis poses a challenge when adapting a pretrained model to a small corpus of domain-specific documents, where each fact may appear rarely or only once.\nWe propose to bridge this gap with synthetic continued pretraining: using the small domain-specific corpus to synthesize a large corpus more amenable to learning, and then performing continued pretraining on the synthesized corpus.\nWe instantiate this proposal with EntiGraph, a synthetic data augmentation algorithm that extracts salient entities from the source corpus and then generates diverse text by drawing connections between those entities.\nSynthetic continued pretraining with EntiGraph enables a language model to answer questions and follow generic instructions related to the source documents without access to them.\nIf the source documents are instead available at inference time, we show that the knowledge acquired through our approach compounds with retrieval-augmented generation.\nTo better understand these results, we build a simple mathematical model of EntiGraph, and show how synthetic data augmentation can \"rearrange\" knowledge to enable more data-efficient learning." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "large language model", "synthetic data", "continued pretraining" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/3f7f02cad5174828e2ae4812f3a70a8db2a199cd.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Synthetic continued pretraining" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0823rvTIhs
Weakly-Supervised Affordance Grounding Guided by Part-Level Semantic Priors
main
Active
weakly supervised affordance grounding;foundation model;pseudo label
applications to computer vision, audio, language, and other modalities
5;6;8
3;3;4
3;3;4
3;3;3
3;3;4
6.333333
3.333333
3.333333
3
3.333333
0.944911
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- The problem is important and well-motivated, as affordance grounding is crucial for robotic manipulation and human-object interaction understanding\n- The proposed pseudo-labeling approach effectively leverages existing foundation models (VLpart, SAM) to provide supervision, addressing limitations of previous CAM-based methods\n- The label refinement process using exocentric images is novel and well-designed, providing a clever way to improve initial pseudo labels\n- The reasoning module helps generalize to unseen objects, which is crucial for practical applications\n- The writing is clear and the method is well-explained with appropriate visualizations" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper tackles weakly supervised affordance grounding (WSAG) by leveraging foundation models to generate pseudo labels, departing from previous CAM-based approaches. The authors propose a three-stage pipeline: (1) using VLpart and SAM to generate initial pseudo labels by mapping affordance-object pairs to part names, (2) refining these labels using human-object interaction cues from exocentric images, and (3) training an affordance grounding model with the refined pseudo labels. The method also includes cross-view feature alignment and a reasoning module to handle unseen objects. The approach shows significant improvements over existing WSAG methods" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The choice of CLIP as the vision encoder could be better justified given previous work suggesting limitations (vs DINO, OWLViT, SAM). For example, the paper will be stronger with an ablation study of different visual encoders." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Could you provide more details about failure cases and limitations of the proposed approach?\n2. How sensitive is the method to the results of VFM? How well can the refine state correct possible errors by VLpart and SAM?\n3. How does the computational cost (training & inference) compare to existing CAM-based methods?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Clear writing and organization.\n- Well-motivated technical approach with clear problem formulation.\n- This paper propose a novel approach that uses visual foundation models and part-level semantic priors for WSAG, unleashing the power of these models for affordance learning.\n- Using human occlusion cues for label refinement, which is an innovative insight.\n- Comprehensive experimental validation and thoughtful analysis of limitations in existing methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the task of weakly supervised affordance grounding (WSAG), where the goal is to identify affordance regions on objects using only image-level labels and human-object interaction images. \nThe key contributions include:\n- A novel pseudo-supervised training framework and pipeline that leverages visual foundation models to generate affordance heatmaps, mapping affordance classes to object parts.\n- Three key enhancements to improve performance:\n - Label refinement using interaction cues\n - Fine-grained object feature alignment with exocentric images\n - Reasoning module for better generalization\n- Extensive experiments demonstrating significant performance improvements over existing methods" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Could benefit from more analysis of failure cases.\n- The label refinement stage using human occlusion cues may be problematic when interactions are ambiguous or when multiple affordances exist.\n- The mapping from affordance to part names is ad-hoc and manually crafted, which limits the scalability to new affordance types and more complex objects." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Aligning the features of an object from different views is a commonly used strategy for feature learning. How is this strategy related to pseudo label generation and refinement.\n2. Some designs need more detailed ablation studies. E.g., how does the proposed fine-grained feature alignment process with SAM perform when compared with the previous work aligning the features directly. Is there any significant performance difference?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. Training affordance grounding models with object labels is an interesting question.\n2. Using off-the-shelf foundation models to generate affordance label is an interesting idea.\n3. Experiments show promising results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper propose a weakly supervised affordance grounding framework. It uses off-the-shelf foundation models to generate pseudo labels of object parts. To further improve the performance, a label refining strategy, a fine-grained feature alignment process, and a lightweight reasoning module are introduced. Experiments show promising results." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. As shown in the ablation study table 2, the improvements of using all these three modules look marginal over using one module. It seems that the effectiveness of the three components are not significant.\n2. In section 3.4, the authors propose to align the features of exo- and egocentric images after SAM segmentation while the existing methods directly align the features of the two images. However, there is no solid experiments to show the effectiveness of this design.\n3. The framework refines the affordance labels with the need of the corresponding exocentric image which may not be available sometimes." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a pseudo-label based method for weakly supervised affordance grounding, utilizing the semantic priors of vision foundation models." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024weaklysupervised,\ntitle={Weakly-Supervised Affordance Grounding Guided by Part-Level Semantic Priors},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0823rvTIhs},\nnote={under review}\n}" }, "abstract": { "value": "In this work, we focus on the task of weakly supervised affordance grounding, where a model is trained to identify affordance regions on objects using human-object interaction images and egocentric object images without dense labels. \nPrevious works are mostly built upon class activation maps, which are effective for semantic segmentation but may not be suitable for locating actions and functions. Leveraging recent advanced foundation models, we develop a supervised training pipeline based on pseudo labels. The pseudo labels are generated from an off-the-shelf part segmentation model, guided by a mapping from affordance to part names.\nFurthermore, we introduce three key enhancements to the baseline model: a label refining stage, a fine-grained feature alignment process, and a lightweight reasoning module. These techniques harness the semantic knowledge of static objects embedded in off-the-shelf foundation models to improve affordance learning, effectively bridging the gap between objects and actions.\nExtensive experiments demonstrate that the performance of the proposed model has achieved a breakthrough improvement over existing methods." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "weakly supervised affordance grounding", "foundation model", "pseudo label" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/8e86da678f19c14a55c806dd8360c86e09890140.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/6d4a8cc6f8e2fdea56ed70c51ffccd79c27af4ac.zip" }, "title": { "value": "Weakly-Supervised Affordance Grounding Guided by Part-Level Semantic Priors" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
08FCLXDY3S
Augmentation-Driven Metric for Balancing Preservation and Modification in Text-Guided Image Editing
main
Active
evaluation metric;text-guided image editing;multi-modal representation
other topics in machine learning (i.e., none of the above)
3;3;5;6
4;3;4;4
2;3;3;3
2;2;2;3
2;3;3;3
4.25
3.75
2.75
2.25
2.75
0.555556
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see the weakness.\n\n1. The key question is that we can use fusion of other existing metric without introducing a new one.\n2. The formulation of clipscore is not common." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The author shows the disadvantage of ClipScore, and try to design a new one. \n\n1. The key question is that we can use fusion of other existing metric without introducing a new one.\n2. The formulation of clipscore is not common." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the author proposes a new metric called AugClip, for Text-guided Image Editing." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Some notations are confusing. \nT sometimes is text, while T sometimes is the set. \n\n2. Overclaims in contribution-1 \"We are the first to point out CLIPScore’s reliability in text-guided image editing\"\nIn fact, most researchers recognize this point, and use a cocktail metric, like FID + Clipscore + SSIM + human evaluation. \n[a] Holistic Evaluation of Text-to-Image Models. NeurIPS. \n\n3. Conflicts in contribution-2 \nIn abstract, the author said using MLLM. \nIn introduction, the author claims LLM. \n\n4. Contribution-3 said \"demonstrates\" but there is no mathmatic proof. \n\n5. Figure 1 does not convince me. \nWe could simply use cosine(f_source, f_edit) to see the preservation. \nhttps://openaccess.thecvf.com/content/WACV2024/papers/Tanjim_Discovering_and_Mitigating_Biases_in_CLIP-Based_Image_Editing_WACV_2024_paper.pdf\n\n6. Figure 2 is similar to Figure 1. \nWe could simply use cosine(f_source, f_edit) to see the preservation. \n\n7. Eq.1 is not commonly-used. \nCould you show the reference? It does not make sense, since clip feature can not use plus or minus operation. \nMost cases I read is using cosine(f_modification text, f_editted image) \n\n8. One simple ablation is missing. \nHow about the weighted sum like cosine(f_modification text, f_editted image) + 0.5*cosine(f_source image, f_editted image) ?\ncosine(f_modification text, f_editted image) higher is better modification. \ncosine(f_source image, f_editted image) higher is better preservation. \nUsually, we will use the FID to indicate the preservation as well. \n\n9. I am confusing about Eq.3,4,5. \nEq 4,5 is about a , not v.\nEq 3 is about v, not a. \nBut the author said a can control v. \"the refined version of v is obtained through hyperplane optimization using as and at.\"" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. The main contribution of this paper is the new metric, augclip, for text-guided image editing. However, very few editing methods are tested with this new metric, which weakens the solidness of the metric. Considering the limited rebuttal period, I will not ask the authors to evaluate all editing methods in table 6 in the appendix. However, since the main benchmark illustrated in the paper is TEdBench[2], I suggest the authors to evaluate Forgedit[3] with augclip since they also released the complete editing results of TEdBench on github. You have to compare image editing methods with augclip to demonstrate its effectiveness instead of text-to-image models like stable diffusion itself in your paper. \n\n2. The clip score definition in equation 1 is different from the mainstream reference[1]. Where does this equation 1 come from? Why is it used instead of [1]?\n\n3. How long does it take to train the augclip metric on each benchmark? \n\n\nI am willing to raise my rating score if the authors could stress my concerns in the revised version of this paper. \n\nreferences:\n\n[1] Clipscore: A referencefree evaluation metric for image captioning. In EMNLP\n\n[2] Imagic: Text-based real image editing with diffusion models. In CVPR\n\n[3] Forgedit: Text-guided Image Editing via Learning and Forgetting. In arxiv" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The observation and visualization that clip score favors modifications instead of preservation is interesting.\n\n2. The idea to transfer the trained SVM from text space to image space and compute the minimum v from source image embedding to target prompt embedding is clever. \n\n3. the two-alternative forced choice testing and ground truth testing are reasonable." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposed a novel evaluation metric, augclip, for text-guided image editing. Motivated by the observation that clip score biases towards modification instead of preservation, the authors utilized GPT-4v to rephrase the source prompt and target prompt to extract essential visual attributes in the form of text prompts. Then the authors trained classification models to classify source prompts from target prompts. Since CLIP itself aligns the image space and text space, the classification model trained with source prompts and target promtps could also be utilized to compute the minimum vector v that transfer source image embedding to target text embedding. The augclip metric is then defined to be the cosine similarity of image embedding of the edited image and the vector sum of v and source image embedding. The proposed augclip demonstrate superior alignment with human evalutions than clip score and lpips score." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Since the main contribution of this paper is the augclip evaluation metric, the authors should compare with more than one image editing method on each benchmark. However, the authors only evaluated the results of one image editing method, the results of stable diffusion 1.5 (which is just a text to image base model without editing capability itself) and the original reference image, with the proposed augclip. For a new evaluation metric, this is far from enough. For example, the authors showed the reference images from TEdBench [2] multiple times in the paper, yet they only evaluate the scores of Imagic+Imagen. There are other related works on this benchmark, for example, Forgedit[3] open-sourced their implementation and released their results on TEdBench on github. \n\n2. Incorrect clip score definition. The clip score in this paper, shown in equation 1 in section 3.1, is different from the usual clipscore being used in text-guided image editing literature [1]. For example, in Imagic[2] and Forgedit[3], the clip score metric's definition follows [1]. \n\n3. Most editing methods in table 6 in the appendix never appear in the paper and section 4.3 is not written well thus is very confusing.\n\n\n\n\n[1] Clipscore: A referencefree evaluation metric for image captioning. In EMNLP\n\n[2] Imagic: Text-based real image editing with diffusion models. In CVPR\n\n[3] Forgedit: Text-guided Image Editing via Learning and Forgetting. In arxiv" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please refer to **Weaknesses**." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The authors evaluate AugCLIP on multiple benchmarks, demonstrating that AugCLIP outperforms CLIPScore and LPIPS with various editing methods and datasets.\n\n- AugCLIP can evaluate both the modification and preservation of the editing images. Compared to CLIPScore, AugCLIP is a more comprehensive metric.\n\n- By leveraging GPT-4V, AugCLIP can evaluate more fine-grained differences between the ground truth image and the edited image. \n\n- The authors provide ablation studies to evaluate difference components of AugCLIP." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces AugCLIP, an evaluation metric designed for text-to-image editing tasks. AugCLIP aims to address limitations in CLIPScore, which can not evaluate the preservation of the original input image. The method leverages GPT-4V for detailed descriptions of the source and target images. By creating a \"modification vector\" based on source and target attributes, AugCLIP balances preservation and modification. The authors demonstrate that AugCLIP outperforms metrics such as LPIPS, CLIPScore on various datasets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- It seems that the authors overclaimed their contributions. For example, in Line 081-083, they mentioned that \"We are the first to point out CLIPScore’s reliability in text-guided image editing\". As far as I know, many papers have pointed out the limitations of CLIPScore. Almost all image editing methods leverage CLIP to evaluate the modification, and LPIPS/FID to evaluate the preservation. For example, [1,2] provides both CLIP and LPIPS to evaluate the editability–fidelity tradeoff.\n\n- The authors seem to confuse CLIP score with CLIP directional similarity score (*i.e.*, directional CLIP loss). From my understanding, the definition in Section 3.1 is more like CLIP directional similarity score rather than CLIP score. Please double check the definition of CLIP and CLIP similarity score in the following link: \n https://huggingface.co/docs/diffusers/conceptual/evaluation.\n\n- The experiments only involves metrics like LPIPS, CLIP. Please consider include the tradeoff between CLIP and 1-PIPS or FID.\n\n- Introducing GPT-4V introduces additional overhead, which is not evaluated in the related experiments.\n\n[1] Zhang, Zhixing, et al. \"Sine: Single image editing with text-to-image diffusion models.\" CVPR 2023. \n[2] Kawar, Bahjat, et al. \"Imagic: Text-based real image editing with diffusion models.\" CVPR 2023." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper introduces a novel evaluation metric for text-guided image editing that balances both preservation of the source image and modification toward the target text. It demonstrates remarkable improvement in alignment with human evaluators on diverse editing scenarios such as object, attribute, style alteration compared to all other existing metrics. Moreover, the metric is applicable to personalized generation, DreamBooth dataset, where the objective is to identify the source object in provided image, and generate into a completely novel context. This shows the flexibility of AugCLIP, that seamlessly apply to variety of editing directions. Notably, the metric excels in identifying minor differences between the source image and the edited image, showing superb ability in complex image editing scenarios such as MagicBrush. The major contributions are summarized as follows.\n- This paper is the first to point out CLIPScore’s reliability in text-guided image editing, as it frequently exhibits a bias towards modification rather than preservation and focuses on irrelevant regions.\n\n- This work proposes AugCLIP, a metric for image editing by automatically augmenting descriptions via LLM and estimating a balanced representation of preservation and modification, which takes into account the relative importance of each description.\n\n- In the experimental evaluations, AugCLIP demonstrates a significantly high correlation with human evaluations across various editing scenarios, even in complex applications where existing metrics struggle." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces AugCLIP, a novel evaluation metric for text-guided image editing that balances both preservation of the source image and modification toward the target text. By leveraging a multi-modal large language model to extract fine-grained visual attributes and applying a hyperplane-based optimization approach, AugCLIP estimates a representation of a well-edited image that closely aligns with human evaluators’ preferences. Extensive experiments across five benchmark datasets demonstrate AugCLIP’s superior alignment with human judgments compared to existing metrics, particularly in challenging editing tasks. Consequently, AugCLIP offers a significant advancement in the evaluation of textguided image editing, providing a more nuanced and reliable approach for assessing modifications while maintaining core image attributes. This metric holds promise for broader applications in personalized image editing and other vision-language tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Overall this work makes an interesting and meaningful observation about the widely used CLIPScore metric, however there are still some concerns:\n- **Discussion on broader indicators.** This work highlights the problem of CLIPScore in the problem analysis in Section 3. Do other quantitative indicators such as FID and LPIPS have similar problems? Please give a more comprehensive analysis.\n- **Suitability for complex editing instructions or tasks.** There are many kinds of image editing tasks, including global editing such as style editing rather than just local editing. How does AugCLIP perform in this case?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024augmentationdriven,\ntitle={Augmentation-Driven Metric for Balancing Preservation and Modification in Text-Guided Image Editing},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=08FCLXDY3S},\nnote={under review}\n}" }, "abstract": { "value": "The development of vision-language and generative models has significantly advanced text-guided image editing, which seeks \\textit{preservation} of core elements in the source image while implementing \\textit{modifications} based on the target text. However, in the absence of evaluation metrics specifically tailored for text-guided image editing, existing metrics are limited in their ability to balance the consideration of both preservation and modification. Especially, our analysis reveals that CLIPScore, the most commonly used metric, tends to favor modification, resulting in inaccurate evaluations.\nTo address this problem, we propose \\texttt{AugCLIP}, a simple yet effective evaluation metric that balances preservation and modification. \n\\texttt{AugCLIP} begins by leveraging a multi-modal large language model (MLLM) to augment detailed descriptions that encapsulate visual attributes from the source image and the target text, enabling the incorporation of richer information. Then, \\texttt{AugCLIP} estimates the modification vector that transforms the source image to align with the target text with minimum alteration as a projection into the hyperplane that separates the source and target attributes. Additionally, we account for the relative importance of each attribute considering the interdependent relationships among visual attributes. Our extensive experiments on five benchmark datasets, encompassing a diverse range of editing scenarios, demonstrate that \\texttt{AugCLIP} aligns remarkably well with human evaluation standards compared to existing metrics. The code for evaluation will be open-sourced to contribute to the community." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "evaluation metric", "text-guided image editing", "multi-modal representation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/094577075cac891aa6b84345f9414fba7122b758.pdf" }, "presentation": null, "primary_area": { "value": "other topics in machine learning (i.e., none of the above)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/c75a814bb45dad0ea12727675816e434503a14d5.pdf" }, "title": { "value": "Augmentation-Driven Metric for Balancing Preservation and Modification in Text-Guided Image Editing" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
09FiNmvNMw
Divide and Translate: Compositional First-Order Logic Translation and Verification for Complex Logical Reasoning
main
Active
Logical Reasoning;Large Language Models;Neurosymbolic Approaches;Semantic Decomposition;Formal Language Verification
neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.)
5;5;5
4;3;4
2;3;3
3;2;2
2;1;2
5
3.666667
2.666667
2.333333
1.666667
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Is it possible to extend CLOVER to improve performance on tasks that involve reasoning with data formats beyond natural language, such as mathematical equations or visual reasoning tasks?\n2. Can the authors provide more insights into how CLOVER compares with CoT-based methods designed for improving implicit reasoning of LLMs?\n3. Why not test CLOVER on a wider range of language models to assess its generalizability?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper presents a new approach by breaking down compositional translation into smaller steps and combining it with verification for logical reasoning tasks, leading to improved performance in neurosymbolic translation.\n- The experimental results are robust using GPT4-o, showing improvements over other methods across multiple benchmarks.\n- The authors propose two SAT-based first-order logic verification algorithms for selecting a sample from LLMs' logical code generations." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces CLOVER, an approach designed to enhance the translation of natural language logical problems into logical code, thereby improving the performance of language models on logical reasoning benchmarks. CLOVER achieves this by compositional translation of natural language into first-order logic and verification of logical semantics. The method involves parsing natural language sentences into logical dependency structures, translating these into first-order logic formulas, and employing verification algorithms to ensure accuracy. The authors demonstrate CLOVER's effectiveness on seven logical reasoning benchmarks, showing it outperforms previous neurosymbolic approaches." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The approach is primarily applicable to problems that can be represented in a SAT solver, limiting its generalizability to other reasoning datasets, such as those involving mathematical equations or visual components, e.g., MATH dataset.\n- The core idea of breaking down tasks into subtasks and using multiple samples and tests (e.g., verification, self-reflection, deterministic tests) to select the best generation is not novel.\n- The paper lacks comparison with chain-of-thought (CoT) based methods designed to improve implicit reasoning of language models, as in \"reliable reasoning beyond natural language\". These methods help the model extract information that is implied but not directly stated by interleaving natural language comments with logical code, and can alleviate the translation bottlenecks identified.\n- The paper only reports results using one language model, making it unclear if the method would improve performance across different models and weakening the experimental results." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. line 115: \"To save computational cost, we compare each one of logically equivalent formulas\". You probably mean to \"compare each logically equivalent formula\". How can this save computational cost?\n\n2. Line 149: how to read this formula in natural language? \n\n3. What is the output for the sentence \"A barber shaves all who do not shave themselves.\"? \n\n4. How are \"Declarations\" created? \n\n5. How to decide a sentence not fit for your system? (or how to decide an unintended input sentence?)" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "It is reasonable to improve the translation quality by decomposing a complex sentence into several shorter sentences. Using SAT solvers certainly improve the quality." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Authors propose a novel method of using LLMs to translate natural language descriptions into a set of first-order logical forms. This novel method decomposes this challenging task into two steps. The first step is to translate a long and complex sentence into a number of short sentences, the second step is to translate each short sentence into simple first-order logical forms and the connections between/among these short sentences into corresponding logical connectors. Experiments on seven benchmark datasets greatly outperform current SOTA level." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Not all natural language sentences can be translated to first-order logic forms. Authors did not discuss what sentences cannot be translated. \n\nAuthors use symbolic SAT solver in evaluating and selecting correct first-order logical forms. This limits the method only for the case where SAT solvers work. \n\nTheoretically, the meaning of natural language is not logical formula. This work is valued within fixed benchmark datasets. \n \nThe formalism of the paper is not easy to read." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- I'm curious why the execution rate increases when using CLOVER. As I read the methods section, it looked like CLOVER primarily helps with execution accuracy, but I didn't see much about how it would help repair/fix/generate better code for the SAT solver. \n\n- It's reported that \"CLOVER’s errors are primarily caused by preprocessing and other errors, which takes 78.6% of the total errors\", do you have examples of this? Is this an error in the accumulation stage? I think the paper does a great job of explaining where Logic-LM fails and why CLOVER is needed, but I think expanding on CLOVER errors is just as important to show where researchers can look next.\n\n- How much of the performance gain seen in CLOVER is due to a higher execution rate (runnable code)? I think expanding on how the metrics in Table 2 are computed would be helpful. For example is `Execution Acc = (correct_and_executable_programs / all)` or `Execution Acc = (correct_and_executable_programs / executable_programs)`. The latter, I think, helps distinguish if you are generating better executable problems or if you are only improving the execution rate (which maybe there is a simple fix to Logic-LM to help it create better executable problems)?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The results are promising; the authors do a fantastic job of motivating their new algorithm CLOVER by showing common failures of previous methods like Logic-LM, then show that their method fixes many of these errors (leading to the performance boost reported in Table 1).\n\n- The method here is pretty novel. Breaking down sentences into atoms isn't too novel, but I haven't seen someone decode them all individually and progressively (tapping into the auto-regressive natural of LMs) to improve the performance of the translation. The verification algorithms seem pretty intuitive (although they are described complexly), but again, despite being intuitive, I think they are fairly novel as well." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Introduces a new algorithm, CLOVER, for solving logic questions in natural language, specifically by addressing the challenges in parsing natural language into the correct first-order logic so that an SAT solver can determine the answer. To do this, the paper proposes translating the question into smaller pieces that accumulate based on how each logic unit in natural language relates to other logic units until the resulting sentence is the final sentence that needs to be tested. Each accumulation from the previous step, including the final sentence, is translated into first-order logic, then the paper introduces two novel verification methods that check if the translations are valid and if there are contradictions in the final translation. The paper shows that with this accumulation strategy with their two verifiers, their method can outperform all baselines (including Logic-LM, a baseline that similarly translates sentences into FOL for SAT solvers) on common logic datasets like AR-LSAT, FOLIO, and ProofWriter." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The ablations in Table 3 need to be explained more clearly and then discussed. What is \"Is Clover?\" and why is the simplest ablation (no clover, direct translation, no verification / essentially none of the new things introduced in this paper) outperforming Logic-LM on AR-LSAT by 10.9% already from Table 1? Does this mean that your direct translation prompt already improves over complex algorithms like Logic-LM? If so, this deflates the papers impact, so it should be addressed (it's possible I am missing something, but others will catch this too, so it's best to explain it away.)\n\n- I believe the paper would benefit greatly from expanding on the models being evaluated; right now, only GPT-4o and GPT-4o-mini are evaluated. Showing that CLOVER consistently outperforms the baseline methods across model classes would improve the impact of this work.\n\n- (minor point) There is no discussion of inference-time compute costs for CLOVER vs. the other baselines. I imagine the inference cost is significantly higher, but I am unsure how much. Is this negligible compared to Logic-LM? Is there a way to compare CLOVER with baselines that use the equivalent amount of compute during inference? I think much of this point could be explained away with a textual justification (i.e., this isn't possible, or the compute costs are nearly equivalent, etc.), but I do think it should be mentioned.\n\n- (minor point) Clarity in section 3 could be improved. I would use the example in Figure 2 to clearly define each variable mentioned in the text to help readers follow your algorithm. For instance, defining with x^prep, T, phi_k, the mapping NL(phi), etc., with values from Figure 2 would help readers follow significantly. This could also be done in Figure 2 if you mark which parts of it are which variables. The text gets very dense with variables that are derived from other variables quickly; having these concrete instantiations really helps." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We introduce CLOVER, a neurosymbolic approach that enhances complex logical reasoning in large language models by compositional translation of natural language into first-order logic and verification of logical semantics." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024divide,\ntitle={Divide and Translate: Compositional First-Order Logic Translation and Verification for Complex Logical Reasoning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=09FiNmvNMw},\nnote={under review}\n}" }, "abstract": { "value": "Complex logical reasoning tasks require a long sequence of reasoning, which a large language model (LLM) with chain-of-thought prompting still falls short. To alleviate this issue, neurosymbolic approaches incorporate a symbolic solver. Specifically, an LLM only translates a natural language problem into a satisfiability (SAT) problem that consists of first-order logic formulas, and a sound symbolic solver returns a mathematically correct solution. However, we discover that LLMs have difficulties to capture complex logical semantics hidden in the natural language during translation. To resolve this limitation, we propose a Compositional First-Order Logic Translation. An LLM first parses a natural language sentence into newly defined logical dependency structures that consist of an atomic subsentence and its dependents, then sequentially translate the parsed subsentences. Since multiple logical dependency structures and sequential translations are possible for a single sentence, we also introduce two Verification algorithms to ensure more reliable results. We utilize an SAT solver to rigorously compare semantics of generated first-order logic formulas and select the most probable one. We evaluate the proposed method, dubbed CLOVER, on seven logical reasoning benchmarks and show that it outperforms the previous neurosymbolic approaches and achieves new state-of-the-art results." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Logical Reasoning", "Large Language Models", "Neurosymbolic Approaches", "Semantic Decomposition", "Formal Language Verification" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/34452baa1c927a1217586b4f505eb5345c199dae.pdf" }, "presentation": null, "primary_area": { "value": "neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Divide and Translate: Compositional First-Order Logic Translation and Verification for Complex Logical Reasoning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
09JVxsEZPf
Towards Comprehensive and Efficient Post Safety Alignment of Large Language Models via Safety Patching
main
Active
Post Safety Alignment;Large Language Models;Jailbreak Defense;Over-Safety Mitigation
alignment, fairness, safety, privacy, and societal considerations
3;5;5;6
3;3;4;4
2;3;2;3
2;2;2;3
2;2;3;3
4.75
3.5
2.5
2.25
2.5
0.688247
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. How does the use of gradient ascent and descent for patch derivation differ from recent work in unlearning?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The problem addressed—post-hoc safety alignment—is important for ensuring that LLMs behave safely in real-world applications.\n2. The empirical evaluation and ablations are fairly comprehensive across different LLM backbones and benchmarks.\n3. The method shows some promise in balancing safety with utility preservation compared to existing baselines." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a method called SafePatching for post safety alignment (PSA) of large language models (LLMs). The authors claim that SafePatching addresses three PSA objectives: safety enhancement, over-safety mitigation, and utility preservation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The proposed approach seems to be largely composed of a series of straightforward adaptations or incremental improvements on recent work. For instance, the use of gradient ascent and descent techniques for deriving safety and over-safety patches is largely an adaptation of existing machine unlearning methods described in the paper, rather than a truly novel contribution. The concept of patching the difference set of important parameters between safety and over-safety patches is perhaps the most novel aspect. However, it's still a relatively straightforward extension of existing ideas in parameter importance and model merging.\n2. While the proposed approach does demonstrate that it is the only one to improve safety, over-safety, and utility over the backbone, in many cases, it performs significantly worse than the baselines for a particular safety or over-safety benchmark. Moreover, the safety and over-safety improvements over the backbone model are quite marginal in some cases. This highlights that there is more work to be done in effectively controlling the balance between safety enhancement and over-safety mitigation than the approach in its current state." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Would you please address the concerns in weakness?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "•\tThe idea is straightforward.\n•\tThe experiments are extensive." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a post safety alignment method which merges two models post-trained on harmful data with gradient ascent and descent respectively. The post-trained and merged model preserves a balance on safety, over-safety mitigation, and utility preservation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "•\tThe paper lacks the comparision of external safeguards methods such as OpenChatKit and NeMo guardrails that are known to handle over-safety issues. Would these external safeguards methods also achieve the three objectives proposed in the paper?\n•\tThere are a few hyperparameters in equation 7&8, such as a, b, \\alpha, \\beta. How you set these parameters? In Table 3, merging methods like the task arithmetic and TIES-merging do not have big differences compared to the intersect patch. Would the benefit comes from your hyperparameter selection?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "Yes, Discrimination / bias / fairness concerns", "Yes, Privacy, security and safety" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* Given that only the AdvBench dataset was used to evaluate SAFEPATCHING, how does the method perform across other safety-related datasets? Could testing with a broader range of harmful data enhance our understanding of its transferability to diverse safety scenarios?\n* Since the authors did not specify whether they directly used the fine-tuned Longformer model from Wang et al. or performed additional fine-tuning, what impact might this setup have on the accuracy and reliability of the judgment model in this experiment?\n* Could a deeper explanation of these aspects clarify the novelty and rigor of the proposed approach in Section 3.3?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* The paper proposes a new method named SAFEPATCHING to address the limitations of existing methods on post-safety alignment for LLMs, such as over-safety issues and high cost.\n* The paper presents experimental results and comparisons with state-of-the-art methods to demonstrate the effectiveness of SAFEPATCHING and uses multiple open-source datasets on safety, over-safety, and utility for a comprehensive evaluation. Besides, this paper has interesting findings on the distribution of the most important parameters for safety and over-safety, providing future research directions for the community." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel post-safety alignment (PSA) method, called SAFEPATCHING, which aims to address safety, over-safety, and utility issues in large language models (LLMs). In this paper, the authors develop a two-stage PSA framework, which applies distinct safety patches to the backbone LLM based on harmful data to improve safety and reduce over-safety, meanwhile, maintaining the utility capability of the LLM. The experiment shows that SAFEPATCHING achieves more effective and efficient PSA compared to baseline methods across four aligned LLMs" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* Lack of justification in Sec. 3.3 controllable patching. The authors may want to highlight the novelty of their tool and the rigor of their method. Currently, it appears that the approach relies on the SNIP score proposed by Lee et al., as well as model merging methods by Yu et al. and Hui et al., without a thorough explanation of the unique contributions or advancements made in this work.\n* Although the authors conducted an excessive experiment to show the effectiveness of SAFEPATCHING, several concerns existed in the settings.\n * The study evaluates SAFEPATCHING using only a single harmful dataset, AdvBench, which may not adequately demonstrate the method's transferability across different safety scenarios. Given the extensive range of safety categories and perspectives, it's essential to assess whether a backbone LLM patched using AdvBench can maintain its effectiveness on other datasets representing diverse types of harmful content.\n * The authors did not specify how they fine-tuned the Longformer-based judger. Wang et al. used annotated data generated through human labor to fine-tune their Longformer model. It remains unclear whether the fine-tuned model from Wang et al.'s work was directly utilized in this experiment or if further adjustments were made. Clarification on this point would provide a better understanding of the model’s setup and any adaptations relevant to this study.\n\nYu, Le, et al. \"Language models are super mario: Absorbing abilities from homologous models as a free lunch.\" Forty-first International Conference on Machine Learning. 2024.\n \nHui, Tingfeng, et al. \"HFT: Half Fine-Tuning for Large Language Models.\" \n \nWang, Yuxia, et al. \"Do-not-answer: A dataset for evaluating safeguards in llms.\"" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "- Potential for Misuse and Safety Bypass\n\nThe SafePatching framework’s dual-patch approach is designed to mitigate over-safety, allowing the model to respond to benign prompts with sensitive keywords. However, this opens up a risk of misuse if bad actors attempt to exploit this flexibility to bypass safety mechanisms deliberately." }, "flag_for_ethics_review": { "value": [ "Yes, Privacy, security and safety", "Yes, Potentially harmful insights, methodologies and applications" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "In the SafePatching framework, Eq (1) and Eq (2) are designed to achieve two opposing objective by applying gradient-based updates in opposite directions on the same harmful dataset. Could you please clarify and elaborate how they are implemented given a harmful dataset?\n\n\nIn SafePatching, what requirements should a harmful dataset fulfill? For example, are there specific expectations concerning its size, diversity, or other characteristics? Additionally, are these requirements realistic for SafePatching's application in real-world scenarios?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "+ The proposed approach is easy to understand, logical, and appears to be effective.\n\n+ It addresses a significant and timely problem.\n\n+ The paper is overall well-written.\n\n+ The unlearning and fine-tuning techniques used in SafePatch are not new, the originality comes from considering dual patching at the same time.\n\n+ The paper includes an extensive set of experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a SafePatching framework to improve the safety of large language models (LLMs) while maintaining their utility. The major contribution is two types of safety patches. \n- The safety enhancement patch utilizes gradient ascent on harmful data to train the model to avoid generating unsafe responses. It effectively helps the model \"unlearn\" unsafe behaviors by adjusting the model parameters to minimize the risk of producing harmful content. \n- The over-safety mitigation patch, developed through gradient descent, is designed to prevent the model from being overly cautious. It fine-tunes the model to ensure it does not overly restrict or reject benign inputs that might superficially appear sensitive or risky. \n\n The approach is tested across multiple LLMs, showing better performance in reducing harmful outputs, handling over-safety, and preserving utility compared to several existing methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Limited Novelty in Core Techniques\n\nWhile the dual-patching approach is innovative in combining safety enhancement with over-safety mitigation, the core methods (e.g., gradient ascent and descent on harmful data) rely heavily on existing unlearning and fine-tuning techniques.\n\n- Clarity on Practical Deployment\n\nThe paper would benefit from more actionable details regarding the real-world deployment of SafePatching, especially the requirements on the harmful data set.\n\n- Stability of SafePatching Approach\n\nSafePatching's dual-patch integration requires careful parameter tuning, especially with the two gradient-based patches potentially introducing conflicts within the model. The process of managing these interactions, although effective, may lack robustness or generalizability across different architectures or types of prompts." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024towards,\ntitle={Towards Comprehensive and Efficient Post Safety Alignment of Large Language Models via Safety Patching},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=09JVxsEZPf},\nnote={under review}\n}" }, "abstract": { "value": "Safety alignment of large language models (LLMs) has been gaining increasing attention. However, current safety-aligned LLMs suffer from the fragile and imbalanced safety mechanisms, which can still be induced to generate unsafe responses, exhibit over-safety by rejecting safe user inputs, and fail to preserve general utility after safety alignment. To this end, we propose a novel post safety alignment (PSA) method to address these inherent and emerging safety challenges, including safety enhancement, over-safety mitigation, and utility preservation. In specific, we introduce \\textsc{SafePatching}, a novel framework for comprehensive and efficient PSA, where two distinct safety patches are developed on the harmful data to enhance safety and mitigate over-safety concerns, and then seamlessly integrated into the target LLM backbone without compromising its utility. Extensive experiments on four representative aligned LLMs, including LLaMA-2/3, Gemma and Mistral, show that \\textsc{SafePatching} achieves a more comprehensive and efficient PSA than baseline methods. It even enhances the utility of the backbone, further optimizing the balance between being helpful and harmless in current aligned LLMs. Also, \\textsc{SafePatching} demonstrates its superiority in continual PSA scenarios. \\textcolor{red}{WARNING: This paper may contain content that is offensive and harmful.}" }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Post Safety Alignment", "Large Language Models", "Jailbreak Defense", "Over-Safety Mitigation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/b28f0d1191a8411bdc4d03df6e9a2f113daa2a14.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/5eec85122a6ffebd9e8aacea99e1f803e3556094.zip" }, "title": { "value": "Towards Comprehensive and Efficient Post Safety Alignment of Large Language Models via Safety Patching" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
09LEjbLcZW
AutoKaggle: A Multi-Agent Framework for Autonomous Data Science Competitions
main
Active
large language models;language agents;multi-agent
foundation or frontier models, including LLMs
5;5;5
4;4;3
2;2;3
3;2;2
3;3;3
5
3.666667
2.333333
2.333333
3
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* Did you do any finetuning over the used models, notably LLMs or are you using frozen models ?\n* Why cannot you compare to any existing baselines ?\n* Have you optimized the creation of your pipeline using these 5 kaggle competitions, or have you left out some of them, to evaluate on competitions you did not know at design time ?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "**Interesting problem.** With the LLMs (+RAG) becoming mature, the open source study of their integration into broader tools that can directly be applied to data science tasks, is the natural next step.\n\n**Overall good presentation.** Even if some details are lacking to grasp the authors' exact contribution (notably in the figures), the overall presentation clearly demonstrates the problem and the approach set up to tackle it. \n\n**Interesting metrics and ablation studies.**" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces AutoKaggle, a pipeline to automatically solve Kaggle Competitions. The authors use 5 subparts in a row: a reader, a planner, a developer, a reviewer, and a summarizer. They use LLMs with RAG to develop code-based solutions, with code running, units tests. They evaluate their method on 5 Kaggle competition benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**Lacking evaluation.** The evaluation is lacking comparison to existing AutoML baselines (*e.g. [1]) or explanations on why the authors are not comparing their method to any existing solution. If running such comparison is not possible at all, then the authors should provide explanations on why this is not feasible. \nWhile detailed reports are provided on their methods and the different components, as this works apply existing techniques, its evaluation is its core contribution. \nThe authors should report (at least) the standard deviation, but e.g. violin plots to compare AutoKaggle's results of other kaggle competitors could help clearly situate where this automatic pipeline stands.\n\n**Evaluation on a (previously) unknown dataset.** It seems that AutoKaggle has been designed to solve these datasets, so one cannot evaluate how much this method would transfer to another, previously unknown dataset.\nIt would be nice to provide the reader with how much out of the box your method is, maybe with a user study. It seems like its your core contribution, so having independent people trying AutoKaggle and commenting on how easy the setup and interaction is on a left out dataset would help people looking for such solutions.\n\n**Figure 2 could be improved.** The figure could be split to separate the overall pipeline from details on some of its components. Most importantly, what part is using an LLM, what part is using a human expert ? This figure represents 70% of what the reader is looking for, it should provide first the overall intuition, and then enough details on specific core components that you want to highlight.\n\n**You related work section is actually a background section.**\nYour current related work covers some domains that are integrated within AutoKaggle. It thus feels more like a related work of your background section (what AutoKaggle builds upon). Is there any *e.g.* AutoML method that you can compare to ? Any method that addresses the same issue ?\n\n\n[1] https://github.com/automl/CAAFE" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the questions in Weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- AutoKaggle introduces a tailored phase-based workflow with multi-agent collaboration specifically designed for data science competitions. The system’s demonstrated high average completion rate and competitive ranking in Kaggle highlight its effectiveness, particularly in tabular classification and regression tasks, showing its strength in handling structured data challenges.\n\n- AutoKaggle empowers the Developer agent to perform iterative debugging and unit testing, bolstering the robustness of code generation. Additionally, the integration of a comprehensive machine learning tools library improves the system's efficiency and accuracy, making it better suited for tackling complex Kaggle competitions" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents AutoKaggle, a multi-agent framework specifically designed to handle the complexities of Kaggle data science competitions. The framework organizes the competition workflow into six distinct phases—background understanding, exploratory data analysis, data cleaning, in-depth exploratory analysis, feature engineering, and model development and validation—allowing agents to work systematically through each stage. Key agents, including Reader, Planner, Developer, Reviewer, and Summarizer, collaborate within this structure, with iterative debugging and unit testing to ensure robustness and accuracy in code generation. AutoKaggle integrates a machine learning tools library to streamline tasks, enhance code reliability, and provide users with educational insights through comprehensive reports at each phase. Evaluated across multiple Kaggle competitions, the framework achieved an average completion rate of 83.8% and ranked in the top 42.8% in Kaggle." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Limited novelty. While the paper addresses data science problem-solving using LLM-based agents, it lacks a clear description of the specific challenges it intends to solve that existing methods have struggled with. Extending from a single-agent to a multi-agent system is insufficiently justified in this field, as the necessity and performance gains of such an approach are not clearly demonstrated. Existing works, as mentioned in the introduction, have also tackled similar problems with LLM-based agents, questioning the incremental contribution of AutoKaggle.\n\n- Multi-agent system design. The multi-agent system, including agents like Reader, Planner, Developer, Reviewer, and Summarizer, is insufficiently explained in terms of its collaborative structure. It is unclear whether these agents operate in an assembly-line fashion or if they engage collectively in each phase under the \"Cooperative Engagement\" label in Figure 1. Further clarification on their integration and interdependence within each workflow phase is needed.\n\n- Role clarity of Planner and Summarizer. Given AutoKaggle’s sequential, phase-based workflow, the necessity of a Planner agent is ambiguous. Can you quantify the contribution (such as on completion rates or error reduction) of this Planner agent in your system? Similarly, the Summarizer’s role in contributing to critical performance metrics such as completion rate or Best Normalized Performance Score, is not explicitly justified, leaving its impact on performance uncertain.\n\n- Unit Test and Debugging. Dose the Developer agent generate dataset-specific unit tests that align with each unique code snippet or not? How the Developer agent adjusts unit tests based on code variations to ensure logical consistency and accuracy across different tasks? \n\n- Lines 275-276 mention the importance of detecting logical errors in code, yet the method for achieving this is underexplored. Can you explain more details about detecting the logical error? More detail is needed on how logical errors are detected and avoided, as conducting exploratory data analysis or statistical checks after data cleaning or feature engineering alone may be insufficient. \n\n- Table 2 illustrates the system's performance across different debugging attempts (DT), showing how increased debugging impacts metrics like Completion Rate (CR) and Comprehensive Score (CS). The data indicate that both CR and CS improve as DT rises, reflecting enhanced task completion and accuracy with more debugging opportunities. What the 'performance plateaus' mean in line 524-525?\n\n- The paper does not provide information on the cost of running AutoKaggle, which is essential for evaluating its performance and practical applicability. It's benifit to provide cost and total runtime to understand the performance. \n\n- The chosen baselines are not entirely convincing. Recent similar works, AIDE[1] and MLE-Agent[2] have shown remarkable capability in Kaggle competition settings. A comparative analysis with these recent works, particularly focusing on AutoKaggle’s unique advantages in effectiveness, efficiency, or other performance metrics, would highlight its distinct contributions to the field.\n\n- A broader evaluation across various task types such as time series prediction, image classification, and text classification, are necessary, as these are critical and challenging categories in Kaggle competitions. The current experiments focus primarily on tabular datasets, leaving it unclear whether AutoKaggle is capable of handling more complex, domain-specific tasks. Can AutoKaggle complete such tasks? \n\n- What is the requirement of the LLM? Can AutoKaggle works well with gpt-3.5 or other open-sourced models?\n\n\n[1] AIDE: the Machine Learning Engineer Agent(https://github.com/WecoAI/aideml)\n\n[2] MLE-Agent: Your intelligent companion for seamless AI engineering and research (https://github.com/MLSysOps/MLE-agent)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "~L140, you say that CoT improves reasoning at the expense of introducing hallucinations. Is there any evidence that CoT makes models any more or less likely to hallucinate?\n\n~L141, you say that the ReAct paradigm addresses hallucinations - that's not my understanding of what ReAct does or how it works, my understanding is that it combines thoughts and actions, yes, but that this has nothing to do with hallucinations or refining outputs.\n\n~L360: What is the difference between \"Success - Non-compliant\" and \"Success - Compliant\"?\n\n~L403: What's the justification / motivation for the complex / compound \"Comprehensive Score\"? How does it compare to other measures, what specifically does it achieve or avoid?\n\n~L431: could you say more about this \"strong baseline\" - I don't understand its construction.\n\nIf adding FE tools drops performance because FE adds too much complexity, then why does \"All tools\" (which presumably includes FE tools) recover this performance?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "A system which can score 43% on Kaggle leaderboards is a significant milestone on the path to automated coding and datascience. Additionally, since many challenges which such a system would face would also arise in more general task-completion (e.g. long-term planning, establishing coherency, managing context, preventing execution from looping) and so would transfer to improve AI agents in general.\n\nGreat collection of Classic and Recent challenges, and baselines seem reasonable (though see my Q about the Strong Baseline).\n\nIt's helpful to have this variety of scores (though see my Q about CS).\n\nArchitecture is clearly laid out, and the paper is overall very easy to read.\n\nClear exploration and explanation of the underlyring readon why the feature-engineering tools reduce the framework's score (many features, leading to more complexity than the agents can handle)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a scaffolding framework which uses LLMs to create a multi-agent system, used to attempt Kaggle problems. They use a \"phase-based\" multi-agent approach, together with a library of hand-crafted ML tools, and extensive hand-crafted unit-tests tailored to the Kaggle problems.\n\nApplying this framework to 8 Kaggle problems (4 pre-GPT-4 training cut-off, 4 afterwards), they achieve a significant solve rate, and an average of 42% on the Kaggle leaderboard.\n\nThe paper also explores ablation of various modules (various tools, and the unit-testing module)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Whenever CoT is used as an interpretability tool, I think it's always wise to mention unfaithfulness e.g. https://arxiv.org/abs/2305.04388\n\nThere are two places where a long list is hard to read:\n#1 ~L78: AutoKaggle integrates a comprehensive machine learning tools library, covering three core toolsets: data cleaning, feature engineering, and model building, validation, and prediction.\n\n#2 ~L186: The data science process is divided into six key stages: understanding the\nbackground, preliminary exploratory data analysis, data cleaning, in-depth exploratory data anal-\nysis, feature engineering, and model building, validation, and prediction\n\nPerhaps \"model-building, -validation, and -prediction\" would be easier to read.\n\n~L146: I'm surprised not to see mentioned what seems to me to be the main thing underlying the motivation of multi-agent systems: finite context length, requiring summarisation and specialisation.\n\nIt's not clear how much of the headline 43% on the leaderboard is down to the skill of the human-in-the-loop, which severely undermines the claim. Without a comparison to how well the human takes unassisted (in terms of success rate or time taken), or to how well AutoKaggle performs without HITL, it's impossible to reliably state how effective the framework is.\n\nUnspecified HITL also undermines the various claims of a \"fully automated framework\" (e.g. L175)\n\nNot much detail on these unit tests. Who writes them? What's the coverage like? Are there any guarantees? If (as I suspect) the \"meticulously designed\" unit tests are written by humans, then we have a similar situation as with the unspecified human-in-the-loop: the framework is not \"fully automated\", and it's impossible to rigorously determine how much effect the human hand-holding has on the framework's suggess. This should, at minimum, be clearly, explicitly and boldly acknowledged.\n\nAdditionally, it is unclear to me how much of the ML-tools library was developed alongside particular Kaggle Competition attempts. If the tools were developed on a case-by-case basis, to address hurdles found in the challenge, then there is significant data leakage from the evaluation dataset to the framework, leading to overfitting to the competitions chosen during development, and much of the headline 43% comes from tools handcrafted by human developers on a case-by-case basis. For a fair validation of how well this framework performs in \"fully automated\" mode, the library would need to be \"frozen\" while the framework was tested on a held-out set of Kaggle Competitions.\n\nVery minor point: ~L350, I agree that there is a risk of data leakage for competitions from before Oct '23, however to say that GPT-4o's training data includes Classic Kaggle is an assumption: better to say simply that there is a risk of data leakage.\n\nIf you're considering data leakage, it would be worth flagging that the 42% includes Classic problems: using only the newer problems, performance is slightly below human average." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose AutoKaggleMaster, a robust and user-friendly framework that solves Kaggle problems through a multi-agent collaborative system." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024autokaggle,\ntitle={AutoKaggle: A Multi-Agent Framework for Autonomous Data Science Competitions},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=09LEjbLcZW},\nnote={under review}\n}" }, "abstract": { "value": "Data science competitions on Kaggle, which represent real-world programming challenges, require sophisticated problem-solving approaches. While LLM-based agents demonstrate potential in various fields, their application to data science tasks often falls short due to difficulties in adapting to data changes in multi-stage reasoning and the need for precise reasoning. To address this, we propose AutoKaggle, a robust and user-centric framework that solves Kaggle problems through a collaborative multi-agent cooperative system. AutoKaggle implements an iterative development process that combines code interpretation, debugging, and comprehensive unit testing covering over 30 tests, ensuring code correctness and quality through LLM-based evaluation. It prioritizes user experience by generating detailed reports that elucidate feature engineering processes, data transformations, model selection criteria, and the reasoning behind each decision. It offers customizable workflows, allowing users to intervene and modify each stage of the process, thus combining the advantages of automated intelligence with human expertise. Additionally, we build a universal data science tool library, including carefully verified functions for data cleaning, feature engineering, and modeling, which form the foundation of this solution. We evaluate the framework on 8 carefully selected Kaggle competitions, achieve 83.8\\% in average completion rate and 42.8\\% average rank in Kaggle." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "large language models", "language agents", "multi-agent" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/06db613e04c157c36722c2631ca958f7f587be75.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "AutoKaggle: A Multi-Agent Framework for Autonomous Data Science Competitions" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
09TI1yUo9K
Noise is More Than Just Interference: Information Infusion Networks for Anomaly Detection
main
Active
Self-supervised learning;Anomaly detection
unsupervised, self-supervised, semi-supervised, and supervised representation learning
3;5;5;5
5;4;5;4
2;2;2;1
2;2;2;3
2;1;2;3
4.5
4.5
1.75
2.25
2
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "In Eq. 7, L_richness. It would be useful to give dimensions of X and F. Possibly also to rewrite the equation to make the way this is calculated easier to understand.\n\nIn Sec 3.3. - How is noise Z added? Is it sampled once and used for all blocks?\n\nIn Eq. 11, which features are max(s) and min(s) calculated from in the normalization?\n\nWhy no M3DM comparison or comparison on MVTec3D or on Real3DAD that have been published and are more widely cited?\n\nThe BTF method achieves an extremely low AUROC score on the ICD dataset showing a strong correlation between the anomaly score and the normality of the example which may be interesting and should be commented on given that the dataset is one of the contributions.\n\nThe discussion of the experimetnal results could be expanded.\n\nWhy are the results on the ICD dataset relatively low in terms of the AUROC scores?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "- Interesting method that aims to improve existing handcrafted features which seems novel.\n- The proposed method achieves state-of-the-art results on the AnomalyShapeNet dataset and on the newly proposed ICD dataset.\n- Most sections of the paper are well written and easy to follow despite the proposed method being constructed of several components." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose a novel method that uses a noise prior to learn to improve the features of a handcrafted descriptor FPFH. The FPFH features are reformulated through a series of Information Gain blocks that attempt to extract useful information from noised FPFH features thus decoupling the noise from the useful information contained within the features. The extracted features are then used to create a memory bank which is used at inference for anomaly score estimation. A packet downsampling process is also proposed, which is a Mahalanobis distance-based greedy coreset sampling mechanism that better samples features in cases where the observed class is composed of several subclasses.\n\nThe authors also propose a new dataset, ICD, where each class is composed of several subclasses providing a unique challenge for 3D anomaly detection methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Some implementation details, such as how noise is injected and sampled during the pretraining phase, could be included as it would improve clarity for the reader. Some implementation details are included in the supplementary but could be moved to the main paper.\n\nThe results of the comparison of the proposed method to related works could be discussed in more detail. Currently the results on AnomalyShapeNet and ICD are only briefly listed in Section 4.3, however no discussion of the results is given.\n\nOn the ICD dataset the current SOTA on AnomalyShapeNet is not evaluated (R3D-AD) and the second best method is a vanilla PatchCore using FPFH features which generally does not achieve SOTA results on 3D anomaly detection benchmarks. Given that the ICD dataset is one of the claimed contributions of this paper the evaluation should be more thorough and the discussion of the results more detailed.\n\nThe ablation study is done on the newly proposed ICD where the performance is very low (0.6 AUC). This makes it difficult to really evaluate the components of the method since most anomalies are already missed and the difference between most experiments is less than 1% AUROC.\n\nOverall I believe the experimental section is the most lacking. There is a lack of discussion of the results on both the AnomalyShapeNet and the ICD dataset. Additionally, the evaluation on the ICD dataset could be more thorough. Methods that are included in the AnomalyShapeNet experiments are not included in the ICD experiments. The results are not properly discussed. Only image-level AUROC is used for the evaluation in Section 4.3 but in the ablation study (Section 4.4) other metrics are also used. The ablation study should also be done on AnomalyShapeNet to get a clearer picture of the impact of each design choice." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The authors seem to achieve better performance by stacking layers of IGB and increasing the number of MLPs within them. Is this performance improvement due to increased computational complexity?\n2. In Table 3, the results without using IP and IGB appear to be better than those with IP and two layers of IGB. Please explain the effectiveness of IGB and IP.\n3. The comparison methods in Table 1 differ from those in Table 2. It seems that the experimental results of CPMF, IMRNet, and R3D-AD on the ICD dataset are missing in Table 2. It is recommended that the authors include these results to demonstrate the reliability of the experiments.\n4. The proposed dataset does not seem to have a significant advantage in terms of defect types and quantity. It appears to be a selection of a few subclasses from each category in the ModelNet dataset." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The authors propose an Information Gain Block-based Anomaly Detection method to address the issue of high intra-class variance. They introduce Rotation-Invariant Farthest Point Sampling and an Information Perfusion module composed of Information Gain Blocks. The authors incorporate noise into 3D anomaly detection to provide more distinctive feature information. Additionally, they construct the Intra-Class Diversity (ICD) 3D anomaly detection dataset" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose an Information Gain Block-based Anomaly Detection method to address the issue of high intra-class variance. They introduce Rotation-Invariant Farthest Point Sampling and an Information Perfusion module composed of Information Gain Blocks. The authors incorporate noise into 3D anomaly detection to provide more distinctive feature information. Additionally, they construct the Intra-Class Diversity (ICD) 3D anomaly detection dataset. The effectiveness of the method is validated on the constructed dataset and the ShapeNet dataset." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "See questions." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. The definition of 'prior noise' is missing. The authors mention 'prior noise' in the abstract and introduction but do not provide a definition, nor is it described in the methods section.\n\n2. How does the proposed method tackle the challenge of high intra-class variance?\n\n3. In Page-2 Line-77, what is the link between extracting valuable information and high intra-class variance?\n\n4. In Table 3, the unit for \"Time Cost\" needs to be provided, whether it is seconds or milliseconds.\n\n5. Are there any hyperparameters in the proposed method? Are they sensitive?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.This manuscript proposes IGB, IGB-AD, RIFPS, IP, and PD module to enhance 3D anomaly detection.\n\n2.This manuscript introduces the ICD dataset for 3D anomaly detection. Different from existing datasets, it includes multiple sub-classes.\n\n3.The method proposed in the manuscript achieves the state-of-the-arts performance on one public dataset and the proposed dataset" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This manuscript claims that most existing 3D anomaly detection methods require the usage of registration to preprocess point clouds and exhibit high intra-class variance. To this end, it proposes IGB, IGB-AD, RIFPS, IP, and PD module to enhance 3D anomaly detection and alleviate these two challenges. Furthermore, it develops an Intra-Class Diversity (ICD) 3D dataset with multiple subclasses. Moreover, the proposed method achieves the state-of-the-arts performance on one public dataset and the proposed dataset." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.The citation format is incorrect, with many references needing to be placed in parentheses. The authors should carefully read Section 4.1 of the Formatting Instructions for ICLR 2025 Conference Submissions.\n\n2.Lack of details on ICD datasets. Since the ICD dataset is the second contribution, the motivation for its creation should be described in the introduction section. \n\n3.The Introduction section could be better articulated. The author spends most of the Introduction describing the current issues with 3D anomaly detection but does not explain how their proposed method effectively addresses these challenges. Deeper insights need to be provided.\n\n4.In Page-2 Line-68, R3D-AD reconstructs normal samples from pseudo abnormal point clouds using a Diffusion model and cannot be categorized as a distillation method.\n\n5.Lack of experiments. 1) the ablation and comparison experiment on the proposed Rotation-Invariant Farthest Point Sampling (RFPS) 2)The performance of the proposed method on the Real3D-AD dataset." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See the weakness." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper offers a comprehensive literature review." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes Information Gain Block-based Anomaly Detection (IGB-AD) for 3D anomaly detection to address the challenges of insufficient anomaly detection information and high intra-class variance. Overall, the writing is not clear, and the experimental results fail to demonstrate the superiority of the proposed method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "it would be better to specify your title to include 3D anomaly detection or point cloud anomaly detection to be more specific.\n\nWhat is your definition of noise? The lack of definition makes the motivation hard for me to understand. like, \"Noise, as a prior source of information, consists of a combination of various types of data\", what is noise?\n\nWhy the teacher-student distillation networks are proposed to mitigate the effects of noise in Lines 65 and 67? I am not convinced by this claim. Like in 3DST, RD4AD, CDO, etc., is there any technique related to noise?\n\nthe description of the method is hard to understand as well. It would be better if you could improve the overview of your method a bit. Currently, I am not clear about your motivation for the framework, yet the relationships between the proposed components and the motivation are unclear.\n\nThe authors only conduct experiments on Anomaly-Shapenet and the established dataset. What about Real3D and MVTec 3D?\n\nWe can see in Table 1, that the proposed method can even perform worse than a simple baseline FPFH in some categories, which is confusing and fails to demonstrate the effectiveness of the proposed method.\n\nAlso, what about the point-level results? In Table 1 and Table 2, only object-level results are presented.\n\nThe ablation results in Table 3 fail to demonstrate the effectiveness of individual components since the variation is not significant enough. We can see that with only PD, the authors even achieve higher P-AUROC than some other variants like in rows 1, and 4 of Table 3." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024noise,\ntitle={Noise is More Than Just Interference: Information Infusion Networks for Anomaly Detection},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=09TI1yUo9K},\nnote={under review}\n}" }, "abstract": { "value": "3D anomaly detection is a crucial task in computer vision, aiming to identify anomalous points or regions from point cloud data. However, existing methods may encounter challenges when handling point clouds with high intra-class variance, especially for methods that rely on registration techniques. In this study, we propose a novel 3D anomaly detection method, termed Information Gain Block-based Anomaly Detection (IGB-AD), to address the challenges of insufficient anomaly detection information and high intra-class variance. To extract ordered features from 3D point clouds, the technique of Rotation-Invariant Farthest Point Sampling (RIFPS) is first introduced. Then, an Information Perfusion (IP) module composed of stacked Information Gain Blocks (IGB) is proposed to utilize prior noise to provide more distinguishing information for the features, where IGB is designed to utilize noise in a reverse-thinking manner to enhance anomaly detection. Finally, a Packet Downsampling (PD) technique is developed to preserve key information between multiple clusters to solve the complex downsampling situation. The main purpose of the framework is to utilize the effective information within prior noise to provide more detection criteria for anomaly detection. In addition, an Intra-Class Diversity (ICD) 3D dataset is constructed, which contains multiple categories with high class-variance. Experimental results show that the proposed IGB-AD method achieves the State-Of-The-Arts (SOTA) performance on the Anomaly ShapeNet dataset, with an P-AUROC of 81.5% and I-AUROC of 80.9%, and also gains the best performance on the ICD dataset, with an P-AUROC of 57.4% and I-AUROC of 60.2%. Our dataset will be released after acceptance." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Self-supervised learning", "Anomaly detection" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/9ae64af9f3e13bb39bc76f276404a4bb1ef10d05.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Noise is More Than Just Interference: Information Infusion Networks for Anomaly Detection" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0A6f1b66pE
Unleashing the Power of Selective State Space Models in Vision-Language Models
main
Withdraw
Vision-Language Models; Mamba;
foundation or frontier models, including LLMs
Honghao Chen;Yibing Song;Shoufa Chen;Chongjian GE;Kaiqi Huang
~Honghao_Chen1;~Yibing_Song1;~Shoufa_Chen1;~Chongjian_GE1;~Kaiqi_Huang1
3;3;5;6;6
4;5;3;4;3
2;2;3;3;3
2;1;2;2;2
2;2;3;3;3
4.6
3.8
2.6
1.8
2.6
-0.669894
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "We thank the reviewers for their valuable comments and we will revise accordingly." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": { "value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors." } }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. In the introduction, the authors mention that the proposed framework is also compatible with Transformer-based LLMs, but there seems no experiments on applying the proposed method on transformer LLMs?\n\n2. What is the Merge operator in the equation (8)?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The proposed method achieve competitive results on benchmarks like Open-ended VQA and challenge sets. It outperforms LLaVA-1.5 with less training time.\n\n2. The proposed method has good intuition on how to better utilize the Mamba's efficiency with good visualizations." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper propose a new way to integrate the Mamba architecture into the multi-modal large language models (MLLM). The technical contribution include: 1. propose using visual long sequence to utilize Mamba's linear complexity. 2. design a cross-stitch scanning approach to extract and combine spatial and semantic features simultaneously. The proposed method outperforms LLaVA-1.5 with less training time and better inference efficiency, and achieve similar performance with model trained on larger dataset such as Qwen-VL." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The proposed method is likely to be dependent on vision encoders. It would be more solid if the author could conduct additional experiments on encoders other than DINOv2 + SigLIP. Also, the author does not show how proposed method perform on single vision encoder MLLM.\n\n2. There is not enough ablations experiments on the scanning orders. For example, no comparison with only using Hv1." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see weakness above." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The authors develop visual long sequences that enhance representation capabilities, ensuring more robust and detailed visual data processing.\n- The authors introduce an innovative cross-stitch scanning mechanism designed to improve the interaction between visual and linguistic data, optimizing vision-language alignment.\n- The authors present MambaVLM-a robust and streamlined MLLM framework. Their extensive testing across various benchmarks validates the effectiveness of their approach." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces MambaVLM, a novel framework that utilizes the Mamba model, a state-of-the-art selective structured state space model renowned for its linear computational complexity and its efficiency in managing long sequences. The authors enhance the Mamba model by incorporating visual long sequences and a cross-stitch scanning mechanism, specifically tailored to boost interaction and alignment between visual and linguistic data. Through extensive experiments and qualitative analyses, they establish MambaVLM not only as a powerful tool for MLLM tasks but also as a pioneering approach that sets a new benchmark for future research in the field." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The contributions are vague; it would be better to clearly summarize the contributions of this paper at the end of the Introduction. This article simply replaces the traditional MLLM with the Mamba model, and the proposed Stitch-Scan is merely a data augmentation stitching method.\n\n- The experiments are insufficient. The core argument of this article is: \"we first construct visual long sequences with multiple vision encoders, which not only enrich visual representations but also leverage the advantages of Mamba in handling long sequences. Notably, this design will not undermine the efficiency obviously, which is in stark contrast with the common cognition of Transformer-based MLLMs.\" Is there any experimental or theoretical support for this conclusion? How much is \"not undermining the efficiency obviously\" specifically? It is recommended that a row be added to Table 4 so that the visual tokens of MambaVLM and MobileLLaMA-2.7B are also consistent at 144, which would support the above point.\n\n- Formula 7 is expressed non-standardly; do not mix mathematical symbols with code.\n\n- In Formula 8, Hv = Merge(Hv1, Hv2, Hv3, Hv4), the Merge method is not explained in the text. What specific merging technique is used, just a simple concatenation?\n\n- In Table 1, the Qwen-VL model outperforms MambaVLM in performance on TextVQA and VQAv2 with a data scale of 665K. Typically in papers, bold numbers indicate the best results obtained by models, but this is not the case in your table. If the bold numbers have a special meaning, please explain this in the text. Additionally, the same issue occurs in Table 2." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See Weakness" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The framework is concise and clear, making the proposed approach easy to understand." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a customized version of the Mamba framework within multimodal large language models (MLLMs). This framework has three core components: a visual long sequence, a Mamba projector, and a Mamba LLM. Experimental results on various benchmarks suggest improved performance and speed compared to several existing methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1): Limited Novelty in Technical Contribution: While the paper proposes a \"visual long sequence\" as part of the framework, a significant body of literature already exists on augmenting visual features using ensembles of different visual encoders, as demonstrated in works such as [A-D]. The design of the Mamba projector, specifically its cross-stitch scanning scheme that concatenates four scanning paths, seems heuristic rather than theoretically grounded.\n\n2): Unclear Motivation for Mamba Projector: The Mamba projector, the primary technical contribution of this paper, has an unclear motivation. The 1x1 convolutional MLP layer can be treated as a full attention layer, suggesting that the Mamba projector is an approximation. Lines 250–253 argue that \"a simple MLP layer may not be able to accomplish sufficient vision-language alignment and interaction of different visual features. Therefore, we devise a lightweight mamba projector…\" However, this rationale does not sufficiently justify the addition of the Mamba projector.\n\n3): Unfair Experimental Comparisons: For instance, in Table 4, using a longer visual sequence generally increases latency. Models such as TinyLLaVA and MobileVLMv2 should be substituted with the Mamba LLM. In Table 2, MambaVLM shows superior performance, largely attributed to encoder ensembling—a common approach in the literature.\n\n4): Presentation Quality: The paper’s overall clarity and presentation could benefit from further refinement.\n\n\n\nReferences:\n\n[A]: BRAVE: Broadening the Visual Encoding of Vision-Language Models, ArXiv.\n\n[B]: Eyes Wide Shut? Exploring the Visual Shortcomings of Multimodal LLMs, CVPR 2024.\n\n[C]: Eagle: Exploring The Design Space for Multimodal LLMs with Mixture of Encoders, ArXiv.\n\n[D]: Law of Vision Representation in MLLMs, ArXiv." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper is easy to understand with clear illustrations on the proposed methods.\n2. According to the experiments, MambaVLM achieves overall better performance compared to previous VLMs, such as Qwen-VL, LLaVA-1.5, and Cobra." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a new variant of vision-language model (VLM) called MambaVLM, which introduces multiple improvements to previous VLM with Mamba method, Cobra. Specifically, the paper proposes to concat the visual features from DINOv2 and SigLIP by sequence axis instead of the channel-axis in Cobra, followed by a new Mamba-based projector. Performance is validated on various VLM benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The novelty is limited with the following reasons: (1) The model is based on Cobra, with minor changes on the concatenation of visual features and projector. (2) The scan directions are from VMamba, only the stitch-scan is novel.\n\n2. Although the sequence-level concatenation improves the performance, it poses a great concern on the efficiency of the model, but the authors did not provide the inference speed, computational cost, and memory cost comparisons. Though the Mamba has linear computational complexity, longer sequence indeed increases the FLOPs and memory consuption, and the heavy projector also introduces additional costs. As a result, directly compare the model with existing methods such as Cobra without comparing the efficiency is **unfair**.\n\n3. In Figure 1, directly comparing LLaVA-1.5 with MambaVLM to demonstrate the effective of Mamba and the superiority on training time is unfair, as MambaVLM uses better DINOv2-SigLIP encoder.\n\n4. In lines 215~235, \"regardless of how many channels ... loss of visual information\" is overstated, lacking precise theoretical evidence to support the claims. Bottleneck-structures are widely used in networks such as ResNet, and according to information bottleneck principle, it is no clear evidence to state that the compression of channels will definitely lose the valuable information. Please reword.\n\n5. In Table 1, some results (62.6, 76.3) of MambaVLM is not the best and should not be bolded. Please correct them." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see the section on weaknesses." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The proposed method in the paper performs very well, achieving better performance than LLaVA 1.5 with only half the training time.\n2. The approach is ingenious, using Mamba for long-context vision-language modeling is a promising avenue worth exploring.\n3. The paper is written with a clear structure." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "MambaVLM is a highly efficient multi-modal large language model framework that integrates Mamba’s linear complexity with a novel cross-stitch scanning approach to improve both visual information interaction and vision-language alignment. Achieving competitive benchmark results with only 0.66 million data points and 14 hours of training on a single A800 node, MambaVLM significantly outperforms LLaVA-1.5 and rivals the performance of Qwen-VL, demonstrating Mamba’s potential in enhancing MLLM efficiency and effectiveness." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The performance comparison with the original LLaVA is somewhat unfair, as the method in the paper uses two visual encoders. It would be better if a version with only ViT-CLIP could be provided.\n2. The method description in the paper is unclear; perhaps I missed where it explains how Mamba-VLM + Vicuna is implemented. It seems that if Vicuna is used, only the Mamba projector is related to Mamba. Of course, I also understand that the performance of VLMs is highly dependent on the performance of the LLM, and Mamba as an LLM is still relatively weak." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@misc{\nchen2024unleashing,\ntitle={Unleashing the Power of Selective State Space Models in Vision-Language Models},\nauthor={Honghao Chen and Yibing Song and Shoufa Chen and Chongjian GE and Kaiqi Huang},\nyear={2024},\nurl={https://openreview.net/forum?id=0A6f1b66pE}\n}" }, "abstract": { "value": "While emerging multi-modal large language models (MLLM) have demonstrated impressive advances, the quadratic complexity of their Transformer-based LLMs (3B or larger) inevitably leads to considerable computational overhead. On the other hand, the recently proposed selective state space model (i.e., Mamba) enjoys both model capacity and computational efficiency, making it an ideal component to enhance MLLM's efficiency and performance. However, recent attempts to introduce Mamba into MLLMs simply replace their LLMs with Mamba, ignoring the unique characteristics of either side. We argue that such a naive combination cannot exhibit the potential of Mamba in MLLMs. In this paper, we delve into harnessing Mamba's unique properties, and propose tailored designs from both multi-modal input and architectural perspectives to unleash its true power. First, we fully utilize Mamba's linear complexity to construct visual long sequences for a thorough perception at a minor efficiency burden. To integrate the scanning mechanism with the built visual long sequence, we devise a novel cross-stitch scanning approach to capture and fuse spatial and semantic properties simultaneously, enhancing the interaction of visual information and the vision-language alignment. Built upon these designs, we propose MambaVLM, a simple yet effective MLLM framework that exhibits highly competitive results across multiple benchmarks. Moreover, our framework is also compatible with Transformer-based LLMs (e.g., Vicuna), demonstrating remarkable training and inference efficiency. Notably, with only 0.66M data and 14 hours training on a single A800 node, our MambaVLM outperforms LLaVA-1.5 by significant margins and performs on par or even better than the 1.4B data trained Qwen-VL. The appealing results from both effectiveness and efficiency aspects indicate the promising prospects of Mamba in MLLMs." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": { "value": [ "~Honghao_Chen1", "~Yibing_Song1", "~Shoufa_Chen1", "~Chongjian_GE1", "~Kaiqi_Huang1" ] }, "authors": { "value": [ "Honghao Chen", "Yibing Song", "Shoufa Chen", "Chongjian GE", "Kaiqi Huang" ] }, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Vision-Language Models; Mamba;" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": { "value": "chen|unleashing_the_power_of_selective_state_space_models_in_visionlanguage_models" }, "pdf": { "value": "/pdf/1183a88cc6949631a83ff61a73451d323b4592a6.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Unleashing the Power of Selective State Space Models in Vision-Language Models" }, "venue": { "value": "ICLR 2025 Conference Withdrawn Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Withdrawn_Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0AHkdAtFW8
Sum-of-Squares Programming for Ma-Trudinger-Wang Regularity of Optimal Transport Maps
main
Active
Optimal transport;sum-of-squares programming;Ma-Trudinger-Wang tensor
optimization
5;5;6;6;6
4;2;2;4;2
3;3;3;3;3
3;1;3;3;3
2;1;3;3;3
5.6
2.8
3
2.6
2.4
-0.166667
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "No ethics concerns" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. What are some other regularity verification methods? how does the SOS programming compare to them in terms of accuracy and efficiency?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper’s strength lies in its innovative application of Sum-of-Squares (SOS) programming to address the longstanding challenge of verifying the regularity of optimal transport (OT) maps through the Ma-Trudinger-Wang (MTW) tensor. SOS programming is a well-established tool in optimization and control, but this work extends it to OT regularity, opening new possibilities for computational verification of the MTW conditions in general cases where analytic approaches are intractable. The paper also showcases the practical efficacy of the approach by applying it to various cost functions, demonstrating its flexibility and adaptability across different scenarios." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a computational approach using Sum-of-Squares (SOS) programming to verify and approximate regions of regularity for optimal transport maps, specifically focusing on the Ma-Trudinger-Wang (MTW) tensor. Regularity of optimal transport maps is critical in many machine learning applications, and this regularity can be assured by the non-negativity of the MTW tensor. However, verifying this condition analytically for general cost functions is challenging. The authors propose using SOS programming to generate certificates of non-negativity for the MTW tensor across a broader range of cost functions, potentially providing computationally verified regions of regularity. Their method is applied to both verifying non-negativity conditions (the \"forward problem\") and to computing inner approximations of regularity regions (the \"inverse problem\") for several ground cost functions, demonstrating the effectiveness of SOS programming in this context. This computational framework contributes a systematic approach to certifying regularity in optimal transport, potentially facilitating its application in various machine learning tasks. The paper concludes by applying the proposed framework to several common examples in the literature." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "A missing key aspect in the paper is the time complexity analysis for the proposed framework. What's the computational efficiency of SOS programming in verifying regularity of the different OT problems? While the authors showcase the method’s application to specific examples and shared the wall-clock time, a time complexity discussion could be a good addition to the paper." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "My main question pertains to how the authors see this work fitting within the broader optimal transport literature. In effect, though some results require regularity of the optimal transport map to hold, these results typically pertain to questions of statistical estimation. In these settings, population measures are estimated based on samples and so (i) absolute continuity cannot be verified a priori, (ii) upper and lower bounds for the density cannot be verified, and (iii) the support of the distributions are unknown. It is thus unclear to me how the content of the current paper fits within the previous context." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "To my knowledge, this is the first paper which explores the question of numerically verifying the MTW condition. The paper is overall written well, and the theoretical details look correct." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a sum-of-squares programming based approach to verifying the Ma-Trudinger-Wang (MTW) condition. Precisely, both the forward problem of identifying if a given cost function and domains satisfy the MTW condition and the inverse problem of finding the largest semialgebraic domain on which the MTW condition holds are considered. The corresponding problems can be solved via standard SOS solvers on modest hardware. The paper concludes with a numerical study which validates the theoretical findings." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The limitation of the paper regards the assumption that the cost function is rational or that the elements of the MTW tensor are rational. I believe it would be useful to provide general examples of when it holds/does not hold in the text to further clarify how strong/weak the assumption really is. \n\nI believe, however, that the implications of this work are not quite fully fleshed out." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Could you please provide examples of real-world OT applications where the knowledge of the regularity of the Monge map is crucial ?\n- Could you please bring more details on the equivalence between non-negativity on polynomial terms and SOS representation ?\n- I think it would be of interest to provide the results on the regularity of the Monge transport map given the three types of non-negativity conditions given in the paper.\n- Have you considered experiments with higher dimension ? Is there any computational burden ?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Although I am no expert of SOS programming, the paper is well written so that it can be read by a large audience. In particular, the notation is easy to understand and Section 2 provides the most essential theoretical elements from OT and SOS programming domains to introduce the method.\n- Given the elements of Section 2, the idea of proving the MTW non negativity via SOS programming seems to be a very good (and natural) idea. This work seems to be the first to answer this question with relatively moderate theoretical and computational frameworks.\n- The formulation of the inverse problem is very interesting and once again well introduced and explained.\n- The diversity of numerical experiments (i.e. non trivial transport costs) definitely proves the theoretical statements." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors propose the first computational approach to certify the regularity of the Monge map for optimal transport problems with specific conditions on the transport cost and the state spaces. To be more precise, they evaluate the non-negativity of the fourth-order Ma-Trudinger-Wang (MTW) tensor associated to the transport cost, which has been proved to be a sufficient condition to establish the continuity of the Monge transport map under proper conditions on the marginals of the transport plan. In this work, they consider three versions of this non-negativity condition, previously considered by related works. Their method consists in reformulating the MTW condition (for each of the three versions) into a sum-of-squares program defined on a semialgebraic setvia Putinar's Positivstellansatz, which can then be solved with efficient software. In particular, their approach assumes that the transport cost (or at least the corresponding MTW tensor) is a rational function defined over a two-state semialgebraic space (or at least, a two-state space that contains a semialgebraic space). They apply their framework to verify if the transport cost verifies the MTW condition or to find the largest semialgebraic set on which the transport cost verifies the MTW condition. They propose several convincing numeric experiments in small dimension for a large variety of non-trivial transport costs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- As it seems crucial to apply SOS programming, the transformation of the non-negativity condition into a SOS representation in Equation (8) would deserve more explanation, in the appendix for example. For non expert readers, this relation is hard to understand.\n- The dimension of the numerical experiments is relatively low, while OT aims at solving large-scale problems.\n- Although the problem tackled in this paper is interesting from a theoretical perspective, I am quite concerned by the effective application of this work to OT problems." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. I encourage the authors to elaborate on potential ML applications or benefits of their work on non-Euclidean optimal transport. For example, would you like to discuss how your method could enhance practical ML systems that use OT, or to provide concrete examples of where non-Euclidean costs arise in ML problems? \n\n2. I encourage the authors to improve accessibility of technical parts (e.g., the description of forward problems and inverse problems), such as adding more intuitive explanations or examples alongside the formal mathematical notation." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "This is a mathematically solid paper and resolve an interesting theoretical problem. It proposes a provably correct computational framework that can certify or falsify the non-negativity of the MTW tensor associated with a given ground cost under the assumptions that the ground cost is a rational and semialgebraic function. The proposed approach is based on sum-of-squares (SOS) programming and can be of independent interests. The authors also demonstrate that the proposed computational framework can be applied to non-rational ground cost function given that the elements of the MTW tensor are rational and can be used to solve the inverse problem." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In the context of OT, the fourth-order Ma-TrudingerWang (MTW) tensor associated with this ground cost function provides a notion\nof curvature. The non-negativity of this tensor plays a crucial role for establishing continuity for the Monge optimal transport map. In general, it is difficult to analytically verify this condition for any given ground cost. This paper proposes a provably correct computational approach which provides certificates of non-negativity for the MTW tensor using Sum-of-Squares (SOS) programming. The authors further show that their SOS technique can also be used to compute an inner approximation of the region where MTW non-negativity holds. They apply this proposed SOS programming method to several practical ground cost functions to approximate the regions of regularity of the corresponding OT maps. They also evaluate the proposed SOS computational framework for both the forward and the inverse problems." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The main concern is that this paper seems irrelevant to the ICLR community. In practice, the common ground cost function would be Euclidean and I am not really sure if it is practically important to conduct the computational verification of OT regularity for a general class of non-Euclidean ground cost functions. I encourage the authors to elaborate on potential ML applications or benefits of their work on non-Euclidean optimal transport. For example, would you like to discuss how your method could enhance practical ML systems that use OT, or to provide concrete examples of where non-Euclidean costs arise in ML problems? \n\nAnother concern is the poor quality of writing. In particular, there are many advanced mathematical notations, such as semialgebraic functions and Archimedean sets, which are not accessible to the ICLR audience. Both Section 2 and Section 3 are written in a technical way without sufficient intuitive explanations or examples alongside the formal mathematical notation. In my humble opinion, the major contribution of the paper would be the SOS formulations for computing the MTW tensors, which is certainly nontrivial, but this paper would much better fit the applied mathematics oriented journal." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The manuscript doesn't contain (A) a discussion on the computational complexity for checking the NNCC condition and the two types of MTW conditions. Also, the discussion on (B) the complexity of the inverse problem is missing. For (A), the author is advised to provide an analysis. For (B), the author is advised to also provide a runtime in the work, i.e. the time it takes to plot the figures.\n\n2. What is the relationship between Monge OT map (which is Borel) and a Brenier map (which is point to point)? Is regularity of Brenier map, supposing it exists, also something that this formulation can answer?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper presents an under-studied aspect of regularity of Monte map. Moreover, the paper contains numerical algorithm and is practical.\n2. The inverse problem is well thought through. \n3. The numerics seems promising for a relatively difficult problem in OT." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a computational method for assessing the regularity structure of optimal transport plans. Specifically, the source and target distribution are continuous distributions on a manifold, the cost is some smooth function, and the regularity in question is the regularity of the pushforward map from source to target distributions. In addition, the computational tool computes the region where the MTW holds." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. While it is not an issue for the author per se, it is unfortunate that the SOS condition only applies to semialgebraic sets for the manifold. \n\n2. The writing doesn't seem to include sufficient focus on the gap between the non-negativity condition and the SOS counterpart. It doesn't seem to be clear whether SOS is too strong for this case. \n\n3. The presentation is not clear for this paper. For example, the indicting convention for something like $c_{ij, p}$ is quite confusing. The only mention of $c_{ij, kl}$ in the earlier part is too far away, and the readers can not be expected to find where the notation is and also generalize from $c_{ij, kl}$ to $c_{ij, p}$. This is too confusing for this conference.\n\n4. Theorem 5 seems currently wrong: the function $F$ in (5) is matrix-valued. It doesn't seem correct to somehow assess if this matrix-valued rational function belongs to \\sum_{SOS}[x, y]. *Unless this issue is either resolved or explained, this reviewer cannot increase the score above the acceptance threshold.*\n\n5. The author doesn't seem to use certain terms in differentiable geometry correctly. For x, y on different points of $\\mathcal{M}$, one cannot directly apply an \"inner product\"/contraction between the tangent plane on $x$ and the contingent plane of $y$. This issue is resolved, however, if $\\mathcal{M}$ is a subset of $\\R^{n'}$ and the differentiable structure comes from the Euclidean space. The author is advised to change the writing on this and make sure no further major mistake such as this is made.\n\nMinor comments:\n- The logic at line 293 is wrong: \\eta(\\xi) = 0 should belong to after \\forall." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a sum-of-squares programming method to numerically certify Ma-Trudinger-Wang regularity of optimal transport maps" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024sumofsquares,\ntitle={Sum-of-Squares Programming for Ma-Trudinger-Wang Regularity of Optimal Transport Maps},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0AHkdAtFW8},\nnote={under review}\n}" }, "abstract": { "value": "For a given ground cost, approximating the Monge optimal transport map that pushes forward a given probability measure onto another has become a staple in several modern machine learning algorithms. The fourth-order Ma-Trudinger-Wang (MTW) tensor associated with this ground cost function provides a notion of curvature in optimal transport. The non-negativity of this tensor plays a crucial role for establishing continuity for the Monge optimal transport map. It is, however, generally difficult to analytically verify this condition for any given ground cost. To expand the class of cost functions for which MTW non-negativity can be verified, we propose a provably correct computational approach which provides certificates of non-negativity for the MTW tensor using Sum-of-Squares (SOS) programming. We further show that our SOS technique can also be used to compute an inner approximation of the region where MTW non-negativity holds. We apply our proposed SOS programming method to several practical ground cost functions to approximate the regions of regularity of their corresponding optimal transport maps." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Optimal transport", "sum-of-squares programming", "Ma-Trudinger-Wang tensor" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/cab80dae9587724cfe6adbe63d723502452d5177.pdf" }, "presentation": null, "primary_area": { "value": "optimization" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/03d9034187d55d4efeb0b054e88695a7d9b9fd88.zip" }, "title": { "value": "Sum-of-Squares Programming for Ma-Trudinger-Wang Regularity of Optimal Transport Maps" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0ASCZrVzSX
Blessing of Dimensionality for Approximating Sobolev Classes on Manifolds
main
Active
approximation theory;manifold hypothesis;statistical complexity;Riemannian geometry
learning theory
5;5;6
3;3;3
4;2;3
2;2;2
3;2;3
5.333333
3
3
2
2.666667
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See above." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper has a natural motivation, and the concluded rate seems matching that of classical Euclidean case. The presentation is lucid, and the proof sketch and extended discussion is well written. Overall this paper is a solid contribution on the topic of manifold learning." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the complexity of Sobolev function class on Riemannian manifolds. Specifically, the paper derives lower bound of the approximation error of a Sobolev ball by a smaller class with complexity bounded as pseudo-dimension. By constructing explicitly functions of bounded Sobolev norm that are separated in $L^1$, the paper connects the packing number of the manifold with a hard-to-learn subclass in the Sobolev ball, thus forcing a larger error/width. The main theorem claims a lower bound that only depends on intrinsic quantities of the manifold." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The result is not too surprising on the 1,p-Sobolev class, and considering that higher Sobolev space can even be an RKHS [1], one would expect major improvement on the rate. Also using volume comparison to control the packing number is rather standard, and one might further ask if the same technique is applicable to metric measure spaces or RCD spaces, though I understand this technicality may not be particularly befitting of this venue.\n\n\n2. Typos: \nDefinition 2.7 is defining packing number not covering number, and also metric entropy is not commonly defined this way. This version of metric entropy is exactly the same as packing number, hence (7) is not needed. (The key proposition C.1 is correct.) $CPf_a$ out side the balls on line 375 is not necessarily 0.\n\n[1] De Vito, Ernesto, Nicole Mücke, and Lorenzo Rosasco. \"Reproducing kernel Hilbert spaces on manifolds: Sobolev and diffusion spaces.\" Analysis and Applications 19.03 (2021): 363-396." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- The Theorem 1 lower bound (12) has a dependence on $p$, unlike in the Euclidean case (Maiorov and Ratsaby, 1999). Is there a plausible reason for this, i.e. is $p$ there for an inherent reason? \n\n- Why is Theorem 1 restricted to the case $K < 0$? What is different about the positive curvature case?\n\n- line 535: it is mentioned that the cutoff functions with bounds on higher derivatives is difficult to construct, but I am having trouble seeing why this should be so. Can the authors explain further?\n\n- line 758: The authors say \"By maximality, balls of radius $2\\epsilon$ at the $p_i$ cover $M$\" but why is this true? The manifold can potentially be very narrow.\n\n- line 691: What is the notation $\\mathcal{A}(z, 16 / \\sqrt{\\text{length}(z)})$?\n\n- Should the empirical risk in (56) be scaled by the sample size?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "For a technical paper, the presentation is approachable and is self-contained (modulo some typos and missing definitions). The paper extends the lower bound proved in (Maiorov and Ratsaby, 1999) to manifolds." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper concerns a lower bound for a nonlinear width (involving the pseudo-dimension, a generalized version of the VC dimension) of Sobolev classes over smooth manifolds of dimension $d$. The authors claim that while the manifold can be embedded in a higher dimensional space with dimesion $D \\gg d$ the width of the Sobolev class has a lower bound that depends only on the dimension $d$ of the manifold." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Not only is the result of Theorem 1 independent of ambient dimension $D$, the ambient dimension does not appear _anywhere_ in the estimates. This is somewhat odd because the abstract mentions the manifold hypothesis which concerns both $D$ and $d$. In some similar approximation bounds, there is typically some dependence on $D$. The authors should address this.\n\n- In a similar vein, the authors do not present a connection between the sample complexity mentioned in Proposition 2.3 and the main Theorem 1, as far as I can see. The assumed connection is that, due to this property of classes with finite pseudo-dimension $\\mathcal{H}_n$, the Sobolev class can also be estimated with the sample complexity given in (2), once the approximating class $\\mathcal{H}_n$ is determined, it can be estimated with this sample complexity. This connection should be made somewhere.\n\n- The main structure of the proof is almost identical to (Maiorov and Ratsaby, 1999), except the construction of the $L^1$-separated set of functions, due to the domain being a manifold. There are a few questions about the extended lower bound, which I ask below in the \"questions\" section." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I don't understand why the main result, Theorem 3.1, is measuring nonlinear n-width between the Sobolev space W^{1,p} and the Lebesgue space L^q. It's really unclear to me why this implies anything about the difficulty in approximating Sobolev space functions. I'd like to see this clarified." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The question of why deep neural networks work well on extremely high dimensional data is an important problem and the manifold hypothesis may be a good way to explain this. Work on this problem is important and valuable in machine learning. The problem of lower bounds on complexity is not studied as often as upper bounds. The results appear to be new and non-trivial." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper is focused on the manifold assumption in machine learning. The goal is to further shed light on how the intrinsic dimension of the data manifold enters into notions of complexity in function approximation. In particular, the authors prove lower bounds on the complexity of approximating Sobolev space functions on manifolds, and show that the lower bounds, which are essentially 1/n^(1/d), depend only on the intrinsic dimension d of the manifold, and not the ambient dimension of the space the manifold lies in. The authors use a notion of pseudodimension that is an extension of VC-dimension and measure complexity by the nonlinear n-width." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "It's not clear to me how applicable these results are in practice. Even when data satisfies the manifold assumption, the intrinsic dimension d may be quite large. It is not clear how large the authors think d is in practice, and how large a d would make these results applicable vs vacuous. For MNIST, for example, it's often given that d is between 7 and 14, depending on the digit. One can assume d is much larger for more challenging problems, maybe 20-40? In this case, the error bound 1/n^(1/d) is vacuous, unless the number of data points n is astronomically large (e.g., if d=20 we need 10^(20) data points!)." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "For Riemannian manifolds, we provide lower bounds on approximating bounded Sobolev balls with classes of finite statistical complexity. The derived rate at which the lower bound converges to zero depends only on intrinsic properties of the manifold." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024blessing,\ntitle={Blessing of Dimensionality for Approximating Sobolev Classes on Manifolds},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0ASCZrVzSX},\nnote={under review}\n}" }, "abstract": { "value": "The manifold hypothesis says that natural high-dimensional data lie on or around a low-dimensional manifold. The recent success of statistical and learning-based methods in very high dimensions empirically supports this hypothesis, suggesting that typical worst-case analysis does not provide practical guarantees. A natural step for analysis is thus to assume the manifold hypothesis and derive bounds that are independent of any ambient dimensions that the data may be embedded in. Theoretical implications in this direction have recently been explored in terms of generalization of ReLU networks and convergence of Langevin methods. In this work, we consider optimal uniform approximations with functions of finite statistical complexity. While upper bounds on uniform approximation exist in the literature in terms of ReLU network approximation, we consider the opposite: lower bounds to quantify the fundamental difficulty of approximation on manifolds. In particular, we demonstrate that the statistical complexity required to approximate a class of bounded Sobolev functions on a compact manifold is bounded from below, and moreover that this bound is dependent only on the intrinsic properties of the manifold, such as curvature, volume, and injectivity radius." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "approximation theory", "manifold hypothesis", "statistical complexity", "Riemannian geometry" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/63ca09a40742fe6d4c6f669525b90ff9f172509d.pdf" }, "presentation": null, "primary_area": { "value": "learning theory" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Blessing of Dimensionality for Approximating Sobolev Classes on Manifolds" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0Ag8FQ5Rr3
The Super Weight in Large Language Models
main
Active
natural language processing
foundation or frontier models, including LLMs
1;5;5;5;5
5;4;4;3;3
1;2;3;3;3
1;2;2;2;2
1;3;3;3;3
4.2
3.8
2.4
1.8
2.6
-0.801784
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Could the authors provide clarification on the points I raised in the weaknesses section, especially if I may have misunderstood some of the contributions?\n\n2. In Figure 6, do the authors have any insights into the concave behavior of the scaling factor? Are there specific explanations or potential methods for identifying this optimal scaling factor?\n\n3. Regarding the stop word shift in distribution, is it generally accepted that a higher probability of stop words negatively impacts LLM performance?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is well-written and effectively illustrates the importance of superweights and superactivations. I appreciate the discussion on the percolation of superactivations across the network and the identification of superweights across layers (Figure 3). Additionally, I find the potential implications of superweight upscaling presented in Figure 6 quite interesting." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper focuses on the impact of outlier weights in large language models (LLMs), specifically larger weights, which the authors term superweights and superactivations. First, the authors analyze how much these weights and activations affect LLM performance. They then use this as motivation to discuss quantization methods designed to account for superweights and superactivations. Throughout the paper, the authors also discuss the impact of superweight scaling and provide experimental results showing how their quantization method improves upon standard rounding, especially when using larger block sizes within the network." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While I appreciate the analysis presented in this paper, I am struggling to see the novelty of this work. I may be misunderstanding, but from what I gather, superweights and superactivations have already been discussed in prior analyses of LLMs. Additionally, it seems that methods like AWQ and SqueezeLLM inherently focus on superactivations. Furthermore, compared to other weight quantization techniques, the proposed method does not appear to offer significant improvements." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The paper mostly focuses on post training model weight/activation analysis and identifies certain handful of importance weights/activations. The authors also say that irrespective of the input prompt the super weights are always the same and they mostly occur in the early layer's down projection with some reasoning via skip connections diagram. \n\nThough these insights are helpful, but it would be good if authors can follow up with what happens during the training process that such super weights are formed in the first place. Does the training methodology in terms of quantization during training/layernorm, gradient scaling, etc play any role in the forming of these super weights?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Novel discovery about the importance of a few handful of neurons: The identification and analysis of super weights and super activations as critical outliers and their positive influence on model's performance is noteworthy and interesting. \n\nQuantization proposals: Authors went one step further to propose a super weight-aware quantization method to make the best use of these super weights/activations. Data free quantization proposal with on par performance compared to SmoothQuant is also a worthy contribution." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper is about the discovery of super weights in LLMs that are disproportionately important, pruning these hurts model quality quite a bit. The authors have provided a way to identify these super weights using a forward pass. Super weights are activations are sensitive to quantization effects and hence authors propose a super weight aware quantization method enabling effective quantization." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Though the discovery is quite interesting, the improvements of proposed methods with existing baselines are quite marginal. In general, such kind of super weights might be a natural phenomenon in any machine learning model. How can one say this is relevant only to LLM's?\n\nThe work seems to be very much based on empirical observations (which is not my concern) but more discussions/intuitions/explanations around how/why these super weights are formed will be useful." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "Please refer to the weaknesses section above." }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The authors conducted experimental explorations on the so-called \"super weights.\"" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates the sensitivity of a subset of outliers in LLMs, referring to them as \"super weights.\" The authors conducted experiments to examine the impact of these super weights on model performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The necessity of \"super weights\" is unclear, as outliers are already identified based on the threshold. Increasing the threshold will naturally reduce the number of outliers with very large weights. Given the known importance of outliers in LLMs, emphasizing \"super weights\" (outliers at a higher threshold) does not appear novel.\n\n2. Figure 1 is misleading. According to the author's definition, \"super weights\" are a subset of outliers. However, the figure suggests -1.9 is a typical outlier with nearby values being quite small (.1 and .2), implying that zeroing out outliers produces nonsensical text—a widely acknowledged fact. To better demonstrate the significance of super weights, it would be beneficial to explore whether zeroing out all outliers results in poor performance, and similarly, whether zeroing out just a small subset (e.g., 20-30) leads to comparably severe degradation.\n\n3. Table 1 raises critical concerns. First, the criterion for selecting outliers needs specification. Second, the \"Prune SW, +SA\" setting in Lines 146-152 is confusing, as it suggests pruning super weights while partially restoring super activations enhances quality. However, the authors did not prune activations, leading to confusion about this claim.\n\n4. Table 2 appears redundant and fails to convey meaningful information. Replacing it with visual representations of \"super weights\" distributions would be more informative, as the current table occupies considerable space without offering clear insights.\n\n5. Figure 2 is difficult to interpret. The depiction of super weights and their impact, such as generating nonsensical text, is not clear. The use of the same color block in both the network and the output is puzzling. Are the model's dynamics linear? How do the output and weights share the same significance? Clarification is needed on whether this figure is based on assumptions or empirical data.\n\n6. In Lines 189-190, the term \"super activations\" is introduced but lacks clarity on whether it is threshold-based or aligns with corresponding weights, which could be time-consuming. The authors should clarify this terminology.\n\n7. The paper contains several unprofessional notations. For example, \"Yij\" should be corrected to \"Y_{ij}\" in Line 204, and similarly, \"Xik\" and \"Wjk\" should be \"X_{ik}\" and \"W_{jk}\" in Line 205. The inconsistency in notation and dimensions between \"d\" and \"D\" in Line 204 suggests a lack of careful writing and review, raising concerns about the overall professionalism of the paper.\n\n8. Lines 198-210, which discuss the identification of super weights, are crucial yet unclear. The selection criteria for super weights remain ambiguous and need a precise mathematical description. Readers should understand the definition of outliers and the criteria for their selection explicitly.\n\n9. The paper lacks consistency in terminology. \"Super weights\" sometimes refer to both activations and weights, and at other times only to weights, adding confusion. In Line 306, the term \"super outliers\" is introduced, suggesting that the paper should maintain consistent terminology from the start, including in the title, if both weights and activations are discussed.\n\nAfter several careful readings, there are numerous additional concerns throughout the paper. The issues are substantial and critical, making it unlikely to meet the standards of ICLR. I recommend a strong reject based on the quality of this paper and will not change my rate." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. For equation 1, the median is used to replace super activation. Is getting the median\ntime-consuming since GPU is not good at sorting? (Although there are GPU-version\nsorting algorithms)\n2. The authors mentioned that SmoothQuant does not report on some models this paper\nevaluates, they compare our results with naive W8A8 quantization (line 407 - line 409).\nCan the authors run SmoothQuant on these methods since it is open-source? The naive\nW8A8 is a too-weak baseline." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The discovery is interesting and the proposed quantization method is easy to implement, which\ncan maintain better performance compared to Round to nearest quantization with the same\nblock size." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper reveals that Large Language Models (LLMs) contain a very small subset of weights\n(super weights) that are extremely important, where removing them severely degrades model\nperformance. The researchers developed an efficient, data-free method to identify these super\nweights using only a single forward pass. They further investigated how these super weights\ninfluence network behavior by analyzing their relationship with activation outliers. Building on\nthese insights, they proposed a quantization approach that carefully preserves these super\nweights while effectively compressing other weights, resulting in the maintenance of model\nquality after compression." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The authors failed to show how the proposed methods can improve the SOTA.\n1. Although the method is data-free, its performance does not exceed SOTA methods like\nSmoothQuant, given incorporating a small calibration dataset would not increase the\nquantization complexity much.\n2. The author mentions that this method is hardware-friendly, but no experiments to show\nits effectiveness in improving latency, throughput, memory usage, etc." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Section 3.2, \"Prune SW+SA\": The description of the \"Prune SW+SA\" condition in Section 3.2 is unclear. Specifically, how does this condition differ from the original model? I understand that super activations typically precede super weights in the model. Therefore, I am unsure what modification is being made in \"Prune SW+SA\" and how it distinguishes itself from the original, unpruned model. Could you please elaborate on this procedure?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The identification of \"super weights\" and their connection to super activations represents a novel and potentially significant finding in understanding the inner workings of LLMs.\n- Connection of \"super weights\" to quantization accuracy is quite interesting and has practical implications.\n- The paper provides a clear methodology for identifying super weights and evaluating their impact, along with an index of super weight coordinates for common LLMs, facilitating further research." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces the concept of \"super weights\" in Large Language Models (LLMs), identifying a small number of individual weight parameters (as few as one) that have a disproportionately large impact on model performance. Pruning these super weights drastically reduces the quality of generated text, while pruning thousands of other larger-magnitude outliers has a negligible effect. The paper proposes a data-free method for identifying super weights based on their connection to \"super activations,\" exceptionally large activation outliers previously observed in LLMs. Finally, the paper demonstrates that preserving super weights and activations during quantization significantly improves compression quality, achieving competitive results methods like SmoothQuant." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "# Major\n\n- Connection to Adversarial Examples: The literature extensively documents how small changes in the input domain can drastically alter output probabilities. Consequently, significantly harming the network by removing weights, as demonstrated, is somewhat expected. A discussion addressing the connection between super weight removal and adversarial examples would strengthen the paper.\n\n- Magnitude Pruning Baseline: In Table 1, the comparison of super weight pruning with global magnitude pruning may not be the most informative. A stronger baseline would involve pruning only within the layer where super activations occur. This would better isolate the impact of the super weight itself.\n\n- Quantization Baseline: The \"Naive W8A8\" quantization baseline should incorporate clipping. The current presentation makes it unclear whether the observed improvements stem from outlier removal or clipping, especially since super weight handling affects only a single layer during quantization, while clipping is applied to every layer. Furthermore, it should be noted that the clipping threshold is determined using Wikitext-2, which is also included in the evaluation of quantized models.\n\n# Minor\n\n- Terminology: The term \"extreme\" might be more descriptive and informative than \"super\" when referring to these weights.\n\n- Weight Distribution Visualization: Including a histogram visualizing the position of the super weight within the overall weight distribution would enhance understanding of its magnitude relative to other weights." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We discover and study \"super weights\" in LLM, which are very few in numbers yet crucial to model quality." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024the,\ntitle={The Super Weight in Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0Ag8FQ5Rr3},\nnote={under review}\n}" }, "abstract": { "value": "Recent works have shown a surprising result: a small fraction of Large Language Model (LLM) parameter outliers are disproportionately important to the quality of the model. LLMs contain billions of parameters, so these small fractions, such as 0.01%, translate to hundreds of thousands of parameters. In this work, we present an even more surprising finding: pruning as few as a single parameter can destroy an LLM’s ability to generate text—resulting in an increase in perplexity by three orders of magnitude and reducing zero-shot accuracy to guessing. We propose a data-free method for identifying such parameters, termed super weights, using a single forward pass through the model. Additionally, we find that these super weights induce correspondingly rare and large activation outliers, termed super activations. When preserved with high precision, super activations can enhance simple round-to-nearest quantization, making it competitive with state-of-the-art methods. For weight quantization, we similarly find that by preserving the super weight and clipping other weight outliers, round-to-nearest quantization can scale to much larger block sizes than previously considered. To facilitate further research into super weights, we provide an index of super weight coordinates for common, openly available LLMs." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "natural language processing" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/42983b9a0531517e43e254e991be6f6a3f53eaf9.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "The Super Weight in Large Language Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0ApkwFlCxq
ComputAgeBench: Epigenetic Aging Clocks Benchmark
main
Active
biological age;epigenetic aging clocks;DNA methylation;aging biomarkers;longevity
datasets and benchmarks
5;5;6;6
5;4;4;3
3;2;3;3
2;2;2;3
3;3;4;3
5.5
4
2.75
2.25
3.25
-0.707107
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see my questions in the above weakness section." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The author critiques previous benchmarks for being either small in scale, limited to predicting chronological age, lacking standardized datasets, comparing only a limited number of models, or relying on mortality and disease data that have restricted access. \n\nThe proposed benchmark seems address all of these limitations. Derived from publicly accessible data, it includes processing of data from both age accelerating condition (ACC) and healthy control (HC) groups to test model’s ability to distinguish between these conditions. Diseases with ACC are well considered. The benchmark includes 4 well-defined tasks with a summary score and evaluates 13 previously published models." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The author introduces a benchmark designed to evaluate models of the epigenetic aging clock. The benchmark includes 66 datasets containing DNA methylation data that meet specific conditions and corresponding metadata, with a total sample size of 10,410. Four tasks are proposed to assess the models’ ability to distinguish between healthy individuals(HC) and age-accelerating conditions(ACC). Results of these four tests are summarized into Cumulative Benchmarking Score. The benchmark framework also includes 13 previously published models results." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper is well-written and comprehensive overall, but several technical points need further clarification:\n\n1. The selection of metrics for benchmark tasks requires more justification. Specifically, why do tasks 2, 3, and 4 report median instead of the mean? Additionally, task 4 mentions the \"presence of covariate shift,\" but this shift is not clearly explained. Could the authors specify the covariate shift further ?\n\n2. The rationale behind the summary benchmark score requires further explanation. Why was this scoring method chosen, and what are its advantages? Also, what does \"positive bias\" refer to in this context? In the Results section, it is stated that $S_{AA1}$ is adjusted by a ratio to penalize prediction bias, yet this concept of prediction bias remains unexplained. Further clarification on what prediction bias entails here would be beneficial.\n\n3. It appears that plots C and D in Figure 3 may be incorrectly presented. Plot D should likely represent $Med(|\\Delta|)$ rather than $Med(\\Delta)$, as all points are above the diagonal. Please clarify if this is a mislabeling or if I have misunderstood the data shown." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "- Will you make your benchmarking dataset publicly available? Can you please add a link to it in your manuscript? I view this benchmarking dataset as a significant portion of your contribution in this work.\n- Can you please confirm that your evaluation tasks/metrics are original, and add citations if not?\n- Can you make a case for why the paper is a strong fit for ICLR, despite not truly being in the representation learning space?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- This paper is written very clearly, and did a great job walking the reader through the background to the problem, definitions of biological age, and different kinds of biological clock models. It’s graphics are informative, clear, and aesthetic. Truly a pleasure to read! \n- Provides colab notebook for reproducibility\n- I believe this paper will be significant to those in the biological clocks community. It is a benchmarking paper, so while it doesn't offer a new methodology itself, it does offer original tasks/metrics for assessing the performance of these models (I think they are original, I asked for clarification in the questions section) and a standardized benchmarking dataset (I asked for clarity to confirm it will in fact be published along with this paper)" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper benchmarks 13 different published biological clock models using a standardized test dataset that they compiled from more than 50 different publicly available studies. While no ground truth data is available for biological age (as it is a latent factor) or for age at death (as this data often isn’t published), the authors offer 4 compelling metrics by which to score the models accuracy and robustness. This paper presents a resource to the community in terms of a newly published benchmarking dataset, well-motivated metrics, and ratings for the current state of the art clock models. The paper also appropriately outlines limitations, such as the fact that some datasets had poor performance across all models, raising questions about dataset shift and for what kinds of data the clocks can be expected to make sound predictions. I believe this paper will help generate scientific discussion and progress in the aging clocks research community." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- I was disappointed that the clock models weren't all re-trained on a standardized training dataset. Without standardizing the training data, it is impossible to know whether the methodology of the clock or the training data it used are contributing to better/worse performance. This insight would be critical to the community in improving clock methodologies going forward.\n- The way that the authors chose to combine benchmarks in the cumulative score requires more justification. I am not sure why the different metrics should affect each other's weights so much. A simple sum, or weighted sum, of the four variables might be more appropriate if stronger justification is not supplied.\n- Requires clarification: on the one hand, authors write \"Clearly, the first task [AA1] provides a more rigorous way to test aging clocks [compared to AA2]\" on the other hand, they write \"The most rigorous of the four, AA2 task demonstrates...\"\n- Your description of the biomarker paradox could be improved. When I first read your description, I was left with questions. I had trouble finding more info on the \"paradox of biomarkers\" using the papers you cited (possibly due to paywall issues, I couldn't see the full articles), but you might consider adding this reference _Sluiskes, Marije H., et al. \"Clarifying the biological and statistical assumptions of cross-sectional biological age predictors: an elaborate illustration using synthetic and real data.\" BMC Medical Research Methodology 24.1 (2024): 58._ as their explanation made me fully understand the problem, namely that \"a (bio)marker that perfectly correlates with chronological age is useless in estimating biological age... in principle a nearly perfect chronological age predictor can be developed, as long as the sample size is large enough [35]. In such a case all signal related to biological aging would be lost.\"\n\nMore broadly, while I really enjoyed the paper, I am not sure it is a great fit for the ICLR community, as this model is a predictive regression model and not in the space of representation learning." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I would like to ask the authors to address the two main criticisms I listed in the \"weaknesses\" section:\n- Overall, the opinion of this reviewer is that while the work has undoubtedly merit, it would be better suited for a forum more specific to biological age and aging clocks. \n- Regarding the normalization of methylation data, I would invite the authors to at least discuss whether the preprocessing of the included datasets match the recommended preprocessing of each aging clock (if any)." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The benchmark is well structured: (i) a variety of datasets and methods are included, and (ii) the tasks upon which the methods are evaluated are well defined and relevant for the domain. Furthermore, such type of benchmarks are quite timely, due to a continuously growing list of available aging clocks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors present a benchmark study where they contrast different computational methods, namely aging clocks, for inferring biological age from epigenetics (methylation) data. A corpus of datasets relevant for the benchmark was built through a systematic search, and it is provided as a resource. Finally, the evaluation was performed on four different tasks, devised in such a way to capture different aspects of aging clocks' performances." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My main criticism is that the paper is only marginally relevant with respect to the topics of the conference. Inferring the biological age of an individual can hardly be considered as learning representations. The machine learning methods used for deriving aging clocks are very well known and established, thus lacking novelty. The tasks presented in the paper to assess the clocks' performances are not totally novel, as the authors themselves point out in section 2.2.\n\nFrom a technical point of view, an important aspect that the paper does not address is preprocessing. Several normalization methods exist for methylation data, and their impact to downstream analysis is well documented (see for example Teschendorff et al. 2013). A robust benchmark should try to evaluate the effect of different normalization methods on aging clock performances. \n\nA minor issue the authors may want to consider: the long list of reference at page 6 could be placed in the appendix, to ease reading\n\nAndrew E. Teschendorff, Francesco Marabita, Matthias Lechner, Thomas Bartlett, Jesper Tegner, David Gomez-Cabrero, Stephan Beck, A beta-mixture quantile normalization method for correcting probe design bias in Illumina Infinium 450 k DNA methylation data, Bioinformatics, Volume 29, Issue 2, January 2013, Pages 189–196," }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "Yes, Responsible research practice (e.g., human subjects, data release)" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "### Questions for the Authors\n\nIn evaluating the dataset and methodology presented, several questions arose that could help clarify the dataset’s potential applications and limitations.\n\n1. **Applicability for Method Development**: Can this dataset be effectively used for developing new methods on epigenetic aging clocks, or is it primarily intended for benchmarking and evaluation? Are there features or structures in the dataset that support novel method exploration?\n\n2. **Data Diversity and Representativeness**: How does the dataset account for demographic and biological diversity? Could the authors provide more details on the inclusion criteria to ensure the dataset is representative of a broad population?\n\n3. **Addressing Balance and Bias**: Were any steps taken to balance the dataset across aging-accelerating conditions (AACs) and healthy controls, or to mitigate known biases in the sample selection process?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "### Strengths \n\n The paper is clear and well-written, providing a solid foundation for its contributions. It presents a unified framework for evaluating epigenetic aging clocks, covering both first- and second-generation clocks. By introducing a benchmark dataset, the authors enable comprehensive testing of multiple epigenetic clock methods. \n\nThis work has potential to significantly impact the field of biological aging, as it offers a standardized dataset that can facilitate consistent evaluation across various epigenetic clock methods. Such a resource will likely streamline method comparison and improve reliability in aging research." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes ComputAgeBench, a unified framework and benchmark dataset for evaluating epigenetic aging clocks, which are predictive models for estimating biological age based on DNA methylation data. The framework aggregates 66 public datasets covering 19 aging-accelerating conditions, along with 13 published epigenetic clock models, to assess model performance consistently across a standardized dataset. The methodology incorporates rigorous evaluation criteria to test each model’s ability to distinguish between healthy individuals and those with accelerated aging conditions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "In reviewing the proposed benchmark in this paper, several key areas for improvement have emerged, particularly concerning data diversity, balance, and bias. \n\n \n\n### Weaknesses \n\n \n\n1. **Limited Report on Data Diversity**: The paper lacks adequate details on demographic and biological diversity, such as age, ethnicity, and health variations. Including these would improve the dataset's representativeness for broader applications. \n\n \n\n2. **Data Balance and Bias**: The authors do not address balance across categories (e.g., AACs vs. healthy controls) or potential sampling biases. This oversight may skew benchmarking results and limit generalizability. \n\n \n\n3. **Absence of Bias Mitigation**: No strategies are mentioned to detect or reduce dataset biases, which is crucial for fair benchmarking in aging prediction models, where demographic factors can affect DNA methylation patterns and model performance. Additional evaluation metrics for fairness would increase the strength of this benchmark. \n\n \n\n4. **Put Together Publicly Available Dataset**: The proposed dataset, to my understanding, is a collection of existing publicly available datasets. The authors do not present to the research community a new benchmarking dataset, they rather collect existing datasets that they put together with a published harmonization technique. \n\nThe fact that the datasets already exist publicly, reduces the novelty of the benchmark. However, I cannot ignore that putting together 66 datasets into a single dataset is a contribution that would facitilitate the comparison of epigenetic clock methods." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "ComputAgeBench is the first framework for benchmarking aging clocks, which comprises 66 open-access datasets and compares 13 published models to find reliable biomarkers of health and aging." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024computagebench,\ntitle={ComputAgeBench: Epigenetic Aging Clocks Benchmark},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0ApkwFlCxq},\nnote={under review}\n}" }, "abstract": { "value": "The success of clinical trials of longevity drugs relies heavily on identifying integrative health and aging biomarkers, such as biological age. Epigenetic aging clocks predict the biological age of an individual using their DNA methylation profiles, commonly retrieved from blood samples. However, there is no standardized methodology to validate and compare epigenetic clock models as yet. We propose ComputAgeBench, a unifying framework that comprises such a methodology and a dataset for comprehensive benchmarking of different clinically relevant aging clocks. Our methodology exploits the core idea that reliable aging clocks must be able to distinguish between healthy individuals and those with aging-accelerating conditions. Specifically, we collected and harmonized 66 public datasets of blood DNA methylation, covering 19 such conditions across different ages and tested 13 published clock models. We believe our work will bring the fields of aging biology and machine learning closer together for the research on reliable biomarkers of health and aging." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "biological age", "epigenetic aging clocks", "DNA methylation", "aging biomarkers", "longevity" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/9e19fd966cb870bf815b97603aafb24b14afab8b.pdf" }, "presentation": null, "primary_area": { "value": "datasets and benchmarks" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/7bee137257178917182aa27edd16b502970dafe1.zip" }, "title": { "value": "ComputAgeBench: Epigenetic Aging Clocks Benchmark" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0BBzwpLVpm
Learning Identifiable Concepts for Compositional Image Generation
main
Active
concept; composition; image generation
generative models
3;3;5;6
4;3;3;4
2;2;3;3
2;2;2;3
1;2;2;3
4.25
3.5
2.5
2.25
2
0.19245
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. When the attributes of a dataset are not directly accessible, how can they be retrieved? \n\n2. The current method utilizes a GAN-based model as the foundation. Is it feasible to implement this approach using a diffusion model instead? \n\n3. Additionally, how many attributes can this method manage effectively? If we aim to train a general-purpose model that can handle more than a thousand attributes, what strategies should be employed to address this scenario?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Compositional image generation is a critical and practical problem, and this paper proposes a method to address it. \n\n- The paper presents an identifiable guarantee for learning the underlying concepts. \n\n- The generated images are promising, demonstrating the potential of the proposed method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses an intriguing problem: compositional image generation. It introduces the minimal change principle and proposes a method to limit the information introduced by each label. A causal conditioning approach is employed to disentangle concepts from correlations. The effectiveness of this method is validated across several tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. This method relies on pre-defined attributes, which limits the method's practical applicability. \n\n2. Additionally, the proposed methods are evaluated only on simple datasets, which may not adequately represent complex real-world scenarios." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* Regarding Section 3.4, it’s unclear why inversion cannot be done in the $z$ space or $w$ space. Would it be possible to move the input of $f\\_i$ to $z$ or $w$ space and perform inversion in $z$ instead?\n* It is unclear why the first row of Table 2 is labeled as \"Ours.\" It appears to correspond to StyleGAN2-ADA." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* The minimal change principle is intuitive and makes sense.\n* The concept of causal conditioning is interesting and intuitive.\n* The proposed method achieves superior FID scores on MNIST4 and Car9 datasets compared to StyleADA and AugGAN." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents the minimal change principle and causal conditioning to allow generative models to create compositional images with clear, identifiable concepts. The central idea is to control image attributes without inducing unintended changes. To accomplish this, the authors regularize the model to learn the minimum dimensions needed to edit an attribute and use causal discovery algorithms to disentangle dependent attributes. The authors empirically and theoretically demonstrate that this approach enables models to learn attributes that are both identifiable and composable." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* There is no quantitative comparison showing if the model controls attributes better than the baselines. For example, metrics like Editing \n FID from StyleRes could be used to demonstrate controllability.\n* Baselines about image editing and compositional image generation are missing.\n * CausalGAN (Kocaoglu, et al. \"Causalgan: Learning causal implicit generative models with adversarial training.\" 2017.)\n * AugGAN(on FFHQ) (Hou, et al. \"Augmentation-aware self-supervision for data-efficient GAN training.\" 2024.)\n * StyleRes (Pehlivan, et al. \"Styleres: Transforming the residuals for real image editing with stylegan.\" 2023.)\n * HyperStyle (Alaluf, et al. \"Hyperstyle: Stylegan inversion with hypernetworks for real image editing.\" 2022.)\n * StyleTransformer. (Hu, et al. \"Style transformer for image inversion and editing.\" 2022.)\n* Except for Figure 8, there is no metric provided for editability or composability, making it difficult to assess whether the proposed method learns more identifiable concepts than the baselines. Additionally, in the ablation studies, it is challenging to gauge the effectiveness of the proposed components without metrics for editability or composability." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- What is the role of $\\mathbf{z}_c^*$ in equation (1)? It seems like it should encode information not represented by annotated labels (e.g., nuanced details). However, isn’t this type of information typically handled by the random noise $\\epsilon$? Does including $\\mathbf{z}_c^*$ have a significant impact on performance?\n- What is the role of $\\mathbf{z}^{\\text{null}}_i$ in equation (6)? What kind of information is it intended to encode?\n- It is hard to fully understand why enforcing the sparsity loss in equation (7) induces the minimal change principle. While Lines 522–524 suggest that constraining the representation’s dimensionality limits redundant information, this rationale is not entirely convincing. The minimal change principle, as described by the authors, states that \"the influence brought by each ground-truth concept should be minimal,\" which implies that changes in representation space should translate to minimal changes in the output space (e.g., altering the ‘age’ should yield the same image but with a different age). However, the sparsity loss in Equation (7) seems to restrict the input representation space rather than the changes in the output space, making it unclear how this connects to the minimal change principle.\n- It would be better to use distinct notations for $\\mathbf{z}_i$ in equation (3) and (6) as they are clearly denoting different variables. \n- Does $\\mathbf{m}_i$ in L177 refer to $\\mathbf{A}_i$? \n- In Figure 6, the authors claim that foundation models (e.g., GPT-4o) generate unrealistic images for unseen attribute combinations. However, all images generated by GPT-4o in Figure 6 appear unnatural, suggesting that the poor results might not be due to rare attribute combinations but other factors, such as improper prompts provided to the model. Could the authors clarify if proper prompt was used, and whether different prompts might correct GPT-4’s performance on unseen combinations?\n- In Table 8, which evaluates generation performance on human faces, it would be more comprehensive to include metrics for other generative models (e.g., GPT-4o, Meta AI, Stable Diffusion 3, as in Figure 6) for comparison.\n- Between the sparsity condition and causal conditioning, which component is the key factor that causes the proposed method to succeed where the baselines fail in Figure 5? Would simply applying causal conditioning to the baselines improve their performance?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The idea of transforming labels to identify and disentangle causal relationships among attributes is interesting, and the authors have effectively demonstrated its impact in the experimental results.\n- The proposed method significantly outperforms the baselines, both qualitatively and quantitatively, validating its practical advantages in achieving high-quality, controllable image generation, even in low-data settings." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a GAN-based framework for learning identifiable concepts. Given ground-truth attribute labels, random noise is transformed into latent representations aligned with these labels, and sparsified using learnable masks to enforce a minimal change principle. To mitigate existing correlations between certain attributes, the authors explicitly identify causal relationships among attributes and factorize the labels to remove dependencies. Empirical results demonstrate that the proposed method outperforms baselines in terms of data efficiency and controllability.\nThe main contributions of the paper are as follows:\n- Formulation of the minimal change principle to learn compositional concepts, along with an efficient approach to factorize causally related attributes.\n- Theoretical proof that the proposed method can recover ground-truth concepts.\n- Empirical evidence showcasing improved data efficiency and controllability." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- It is unclear how the proposed method learns compositional concepts more effectively or in a fundamentally different way compared to existing approaches. Since the baselines also leverage disentangled ground-truth attribute labels, wouldn’t they similarly be capable of learning a generative model for compositional generation? In a similar context, it’s not fully explained why the proposed method is more data-efficient than the baselines. A more detailed elaboration on these points would strengthen the paper.\n- The paper introduces several components (e.g., sparsity loss, learnable masks, $\\mathbf{z}^{\\text{null}}_i$​), but the justification for each component and their connections seems weak. It is a bit confusing as a reader to understand why each part is necessary. Please refer to the questions below for specific points on this aspect." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "n/a" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "It seems a relatively big limitation that the method relies on such rigid one-hot labels when the modern paradigm of image editing involves free-form textual descriptions. Do the authors envision easy ways to extend this to continuous or multi-label attributes, or free-form text? A discussion of the proposed binary attribute paradigm relates to the common free-form text editing one -- and their relative strengths -- would be insightful here." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The studied problem of generating unseen attribute combinations is a pertinent one, with important implications for under-represented demographics and subpopulations. The authors did a convincing job with Figure 1 and in the introduction of motivating the problem with current large-scale image synthesis models, and making the benefits of the proposed solution salient.\n- I appreciate that the experiments are relatively thorough in exploring multiple forms of image synthesis. Not only do the authors consider unconditional synthesis, but they also show how one can edit real images, greatly improving the contribution of their method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors study compositional attribute generation and editing in image synthesis models. They argue relatively convincingly that the current large-scale image generation models fail to generate uncommon attribute labels (e.g. “female” + “facial hair”), and propose a methodology to address this through the use of masks learned with a causal structure. The results show the method produces images that do not exhibit mode collapse like the baselines. In the case of editing real images, there is significant improvements to the editing of rare attribute combinations over recent work." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "## [W1] Trade-off in performance for common attributes\n\nIn Figure 7, whilst the method clearly excels at generating rare attribute combinations (e.g. female + goatee), it fails in other cases to make more common edits (e.g. +blonde hair, or +bald).\n\nTo me, this seems very problematic. Almost by definition, most users will be interested in generating common attribute combinations. The fact that the method works so well on unseen combinations is a testament to its potential value, to be clear, but trading-off functionality for common edits at the same time seems like a clear and fundamental limitation. What use case does the proposed method serve if it’s at the cost of the common attribute combinations? In my view, this is the primary issue with the paper.\n\nAt minimum, I would expect to see a detailed discussion of this trade-off, and a solid justification for why it is worth making. Do the authors have any insights into why this might be happening? Furthermore, an insightful study would be one that quantifies the \"accuracy\" of edits for common vs uncommon attributes -- one could train a CelebA binary classifier to classify if an edited image actually depicts the new attribute or not, and one could see a breakdown of the performance for common vs rare attributes.\n\n## [W2] Lack of convincing baselines for independent attribute datasets\n\nI am not convinced that the authors do a good job of showcasing the benefits of their method in the independent attribute setting (Table 1 and Figure 5). Concretely, it is worrying that the baseline methods mostly fail to generate anything coherent at all (~20x as large FID scores). This really does not tell us much other than the baselines failed to train well (which could be for any number of reasons).\n\nThe authors could do a better job training the baseline models for a fairer comparison (e.g. perhaps with significant data augmentation, or through differentiable techniques such as [1]). Ultimately, we are not interested in the image quality itself, but instead in how well they perform in the “Out-FID” row on the rare attribute combinations. Through better training of the base models, we can isolate the impact of the proposed method on this row of interest without the confounding variable of the raw image synthesis quality in the way.\n\n## minor\n\nThe paper is full of typos, and some poorly written sentences. Just to mention a handful of examples from the introduction alone on the second page:\n\n- [L64] leads to → lead to\n- [L66] Ssadow → Shadow\n- [L72] mkae→ make\n\nUltimately these typos are indicative of a lack of care for presentation, and at times this renders the sentences hard to parse which I found often detracting from the content of the paper. I suggest some careful proof-reading is needed before the camera-ready or resubmission.\n\n---\n\n[1] Zhao et al. “Differentiable Augmentation for Data-Efficient GAN Training.” NeurIPS 2020." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We present a method for compositional image generation and editing with identifiability guarantees." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024learning,\ntitle={Learning Identifiable Concepts for Compositional Image Generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0BBzwpLVpm},\nnote={under review}\n}" }, "abstract": { "value": "Humans have the ability to decompose objects into parts and relationships and\ncreate new objects by properly combining existing concepts. However, enabling\nmachines to achieve this in real-world tasks remains a challenge. In this paper,\nwe investigate how to teach machines compositional image generation through\nlearning identifiable concepts. To derive concepts from attribute labels, we formulate the minimal change principle and propose a method to limit the information introduced by each label. Additionally, to address dependent attribute labels\n(with causal influences in between or common causes behind them), we present\na causal conditioning approach to disentangle concepts from these correlations.\nOur framework enhances data efficiency, interpretability, and control, while enabling sampling from unseen combinations. We validate our method on various\ncompositional image generation and editing tasks, demonstrating its effectiveness\nthrough superior performance." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "concept; composition; image generation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/1746275f4e677f128e95eb27c5e373f2d90afb6b.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/6a61193071ae2b9548fa31ed5f73a2bbacfea3e4.zip" }, "title": { "value": "Learning Identifiable Concepts for Compositional Image Generation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0BujOfTqab
AdvWave: Stealthy Adversarial Jailbreak Attack against Large Audio-Language Models
main
Active
jailbreak;adversarial attack;audio-language model
alignment, fairness, safety, privacy, and societal considerations
3;3;6;8
5;4;5;4
3;1;3;3
2;3;4;3
3;2;3;3
5
4.5
2.5
3
2.75
-0.235702
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "The paper proposes a jailbreak attack against Large Audio Language Models that can enable users to extract harmful information from these models cause them to respond to other users in a harmful manner." }, "flag_for_ethics_review": { "value": [ "Yes, Privacy, security and safety", "Yes, Potentially harmful insights, methodologies and applications" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Why is noise-to-signal ratio used instead of the more common signal-to-noise ratio? Is it computed in a similar manner as SNR? The normalization and subtraction yields a quantity that is proportional to SNR so perhaps its simpler to just use SNR.\n1. How exactly is $S_{\\text{Mel-Sim}}$ computed? The mel spectrogram is a matrix so how exactly is the cosine similarity computed? \n 1. Why is cosine similarity used instead of L2 distance that is commonly used to compare mel spectrograms? I am not sure if the cosine similarity has a reasonable interpretation for mel spectrograms." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is well written and generally clear\n1. The proposed approach is novel and fills an important gap in the current literature.\n1. The proposed attack is successful on diverse models which indicates its generalizability\n1. Using an audio classification loss to make the adversarial suffix resemble natural sounds is an interesting and novel approach" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a gradient-based jailbreak attack against Large Audio Language Models (LALM). The proposed method optimizes an adversarial audio suffix that bypasses the safety alignment of the LALM and causes it to produce harmful outputs. To account for the discretization performed to convert continuous audio representations into discrete tokens, a \"dual-phase\" optimization method is proposed whereby, first, the discrete token sequence is optimized to produce the desired harmful output and then the audio suffix is optimized to yield the discrete audio token sequence. Additionally, an adaptive search procedure is proposed to determine the best target for the adversarial loss optimization, and a loss component is introduced to make the adversarial suffix resemble a given environmental sound. Results show that compared to baselines the proposed approach greatly improves attack success rates." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The -Trans baselines seem to weak because these attacks tend to introduce symbols, like special characters, punctuations and emojis, that are not vocalizable so it is expected that generating speech from them will produce weak results. I recommend presenting results for the text-only attack along with the -Trans attack. This way the actual advantage of exploiting the auditory modality will become apparent.\n 1. A better baseline could be to adversarially attack an ASR model that uses the same audio encoder as the LALM such that the target transcription is the text-only attack string.\n\n1. More details about the human evaluation score ($S_{\\text{Human}}$) are needed, including the number of raters, inter-rater agreement, and did all raters rate all the test audios.\n1. The normalization used for the stealth scores seems to be weight the components unfairly. The NSR and cosine are normalized by their theoretic maximum, while the human score is unnormalized so if the actual NSR and cosine scores occupy a smaller range then their contribution to the score will be penalized. A better normalization scheme might be to normalize the mean to 0.5 and standard deviation to 0.125.\n1. The presentation can be improved:\n 1. Phase II is to the left of Phase I in Figure 1. I suggest reorganizing it to make it appear to the right.\n 1. The phrase \"gradient shattering\" or \"shattered gradients\" is confusing here because in prior work it refers to the specific phenomenon that as neural networks become deeper their gradients resemble white noise [1]. The particular phenomenon of relevance in this study is generally referred to as \"gradient obfuscation\" or \"obfuscated gradients\".\n 1. The phrase \"retention loss\" is confusing because it is not clear what is being retained. The target discrete token sequence can not be \"retained\" because the encoder currently does not output it and it is being optimized to do so. Perhaps, \"alignment loss\" or \"sequence loss\" might be better.\n 1. It is not clear from equation 2 that only a suffix is being optimized. It appears that the entire audio is being optimized.\n\n\n[1] Balduzzi, David, et al. \"The shattered gradients problem: If resnets are the answer, then what is the question?.\" International conference on machine learning. PMLR, 2017." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Did you try other audio-transcribed jailbreak classes, including more naturalistic text like in Zheng et al's persuasion paper? [1]\n2. What made you think GCG and BEAST were strong baselines when translated into audio? \n3. Did you attempt your jailbreaks on any versions of Gemini or 4o? To my understanding some of the more capable models are only trained to recognise speech data - which would presumably make your noise perturbations less effective?\n4. Who were the humans judging your stealthiness? was there a more detailed rubric you can share?\n\n\n\n\n[1] https://arxiv.org/abs/2401.06373" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "This paper tackles a important and as yet unaddressed issue in jailbreak literature, and does so with sensitivity to realism. I am particularly impressed with the authors' operationalisation of stealthiness as urban noise (pending audio samples that I can listen to when the paper is out). The authors' use of human judgment to verify and counterweight their classifier (itself a potentially valuable contribution to audio-jailbreak defense) strengthens my confidence in these results even if I can't aurally verify them myself. \n\nThe results of their optimisation are strong. Their ASR results are comparable to or exceed the other audio-jailbreak papers I know of that were submitted to ICLR.\n\nThe methods developed to optimise jailbreaks against audio-models, given the challenges the authors list, are valuable and novel contributions themselves. In particular the method for adversarial optimisation target search seems to me to strengthen the jailbreak method over the baselines they test against. For example, GCG is reasonably well-known for optimising for outputs such as \"Sure,\" even if immediately followed with \"I can't help with that.\" The adaptivity and greater detail of the jailbreak targets listed in the appendix seem to me to increase the likelihood that jailbreaks listed as successful in this paper do in fact contain jailbroken information. I'm also given increasing confidence in the evaluations of this paper by the authors' use of both a word-based classifier that detects refusal strings, and an LLM graded response." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduce a novel jailbreak framework for optimising audio jailbreaks against audio-language models (ALMs). They overcome challenges in the design of ALMs: namely 1.) they find a dual-phase training process so they can optimise attacks even through discretisation operations in the tokeniser, and 2.) they develop an adaptive search method to find more flexible adversarial targets.\n\nFinally, the authors introduce a realistic constraint on their work: that the audio jailbreaks are stealthy. They operationalise this as having human and ALM-based classifiers independently score the audio input for signs that it was adversarially tampered-with. The authors claim (it's hard without hearing audio samples myself) that their jailbreaks are hence indistinguishable from normal urban noise (e.g. a car horn)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While I'm overall very positive on this paper, I'm a little underwhelmed by the baselines. I would expect that the adversarial perturbations of GCG and BEAST to be quite brittle to being converted to spoken text and then fed into an ALM. These are worthwhile baselines to run, but more semantically-natural baselines like AutoDAN would have pushed the paper even further. The authors acknowledge the difficulty and novelty of introducing audio-based adaptive attacks, like transfers of PAIR or TAP: I would have been very excited to see the authors tackle adaptive jailbreaks in the audio domain, but understand why for reasons of cost and difficulty that this might not be feasible - though I am aware of an unpublished audio implementation of PAIR.\n\nI think Fig 1 is quite challenging to parse. I would rather it be simplified quite a lot more before final release. In particular, I think there is too much text annotating Phase II, even if helpful for diving deeper into the method. I would prefer at least a much more abstracted version of the figure, without reference to variables from Equation 1, and with the annotation retooled to explain how the different branches refer to each other. At the moment I think it's too hard to understand without continuous reference to Equation 1, and the figure struggles to explain itself on its own." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Have you thought about measuring how your attacks transfer between models? I’d love to see transferability in your work since the threat model I think is most concerning is people finding white-box attacks on open-source models that transfer to more powerful closed-source models. See examples here: https://arxiv.org/abs/2403.09766 , https://arxiv.org/abs/2407.15211\n\nSmall discussion point on using LALMs. Most of the field uses VLMs for vision language models, so do you think using ALMs would be a better acronym to popularise in the field?\n\nI have weaved most of my questions into the weaknesses section. I think this paper has the potential for a much higher rating (especially given the timeliness of getting attacks working on LALMs, which is a neglected area of the adversarial attack literature), but not in its current form. I am happy to increase my score if the weaknesses I highlighted are addressed." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "- **Relevant and timely approach**: LALMs are becoming more prevalent with the recent release of audio capabilities in Gemini and GPT-4o. However, to the best of my knowledge, AdvWave is the first work that has successfully got white-box jailbreak attacks to work.\n- **Innovative approach**: AdvWave uses a dual-phase optimisation strategy to address the issue of not being able to backpropagate through the full network when it contains discretisation. They also improve optimisation efficiency by adaptively finding a target string that matches the common structure the model uses for benign requests. These challenges are clearly explained, and the authors provide solutions.\n- **Potential for future research**: AdvWave opens several avenues for future work, including further exploration of defensive mechanisms and applying the framework to LALMs that do not have a discretization bottleneck." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces AdvWave, a framework for conducting white-box adversarial attacks against large audio-language models (LALMs) to elicit harmful information. The authors identify the unique challenges posed by LALMs, such as gradient shattering due to discretisation operations in audio encoders and maintaining stealthiness constraints. To address these issues, AdvWave implements a dual-phase optimisation strategy. The first phase optimises a discrete latent representation to circumvent the gradient shattering issue, while the second phase adjusts the audio waveform itself to align closely with this representation while preserving perceptual naturalness. AdvWave significantly outperforms transferring static jailbreak attacks optimised on text-only LLMs that are subsequently vocalised with text-to-speech (TTS). The authors argue that their approach highlights the need for more robust defenses and safety measures in LALMs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- **AdvWave is not the first approach to jailbreaking LALMs:** The authors of the paper claim that AdvWave is a novel framework for jailbreaking LALMs, but I think this claim needs to be softened throughout the paper. Gemini and GPT4o both have audio-specific jailbreaks, where they vocalise text jailbreaks with TTS in their recent model cards. Therefore, claiming that AdvWave is a novel white-box attack methodology for LALMs is better.\n- **Stealthiness constraints are not well-motivated or measured:** It isn’t clear to me why this stealthiness constraint is needed. Any jailbreak, no matter the stealthiness of the input, is bad. Also, stealthiness constraints are not new; they were used in white-box attacks of speech-to-text (STT) models, and the intro doesn’t explain why LALMs make it more difficult. Papers such as “Imperceptible, robust, and targeted adversarial examples for automatic speech recognition” should be cited. Did you ablate changing the whole audio file rather than just the suffix? What motivated the suffix and the environmental noises? You can have imperceptible changes to the audio without that, I believe. Also, what environmental classifier do you use? This needs to be cited for replication purposes.\n - I’m very confused by the stealth metric and why it is a useful comparison to the baselines. The baselines do not have adversarial suffixes added on; they are just TTS reading a harmful prompt. So why isn’t their S_stealth equal to 1? It should be maximally stealthy, just like the vanilla baseline. Also, the baselines do not have a car horn at the end of the utterance, which could be considered more stealthy than your method. You mention you need high stealthiness so it is less detectable by real-world guardrail systems, but I don’t think the results presented demonstrate this. Also, AdvWave is not superior to vanilla in terms of stealth. The three terms in S_stealth are confusing and not well-motivated.\n- **Lack of relevant adaptive black-box baselines:** The paper only compares attacks that are optimised on text-only LLMs that are then transferred into audio with TTS. Using TTS to vocalise GCG attacks might not make sense - there could be lots of tokens that can’t be spoken properly so I would expect the attack to be very weak. You say there are no adaptive attacks due to gradient shattering, but there are plenty of good adaptive black box attacks. I expect PAIR and TAP to work well in the audio domain. AdvWave should be evaluated against much stronger baselines than currently used. How strong are the transfer GCG/BEAST attacks to the base text-only LLM? E.g. what is the GCG ASR on Llama3? That would inform if the baselines transfer to the audio domain effectively or if they are broken by the architectural / fine-tuning differences.\n- **Lack of clarity on LALM architecture differences, what architecture AdvWave is aimed at, and motivation for why dual optimisation to solve gradient shattering is needed:** Not all LALMs have discretisation of audio before input to an LLM (like SpeechGPT). Many insert a continuous vector directly into the embedding space of the model (e.g. DiVA, Llama3.1, Salmonn, Llasm, AudioQwen). Therefore, these won’t have the gradient shattering problem, and the framework in Figure 1 isn’t relevant. There needs to be better motivation and explanation of why AdvWave targets LALMs that have the discrete bottleneck. Ideally, the paper will explain all the different architectures and introduce a framework that works for all the variants. Also, many LALMs do not have a decoder that maps back to audio space. Lots just do audio-to-text. Only a few models are fully speech-to-speech (some are trained end-to-end, and others just put a TTS module on end). It is important to talk about these. Furthermore, why can’t you use a straight-through estimator or Gumbel softmax to differentiate through the discretisation instead of the dual optimisation approach? I need more motivation to believe this is necessary.\n - Also, is gradient shattering a well-known term? A quick search gets something different: https://arxiv.org/abs/1702.08591. Perhaps the problem could just be called “Non differentiable audio tokenisation” or similar? I don’t think the dual optimization method is novel, it would be good to find the original paper that implements something like this. PErhaps it would be in the VQVAE literature?\n- **Lack of threat model:** I’d like to see your threat model go into depth more about why you focus on white-box attacks and why you need stealthiness constraints. E.g., you can just apply existing white-box attacks to text LLMs already and get bad outputs; why do we care about LALM defense when text isn’t solved? Isn’t an attack that elicits a harmful jailbreak that isn’t “stealthy” also a success from the red team’s perspective? Why does it need to be understandable? These can be addressed in your threat model. Also, you mention in related work that LALMs shouldn’t be deployed widely if they are not robust, but releasing them as closed source is fine since you can’t attack with AdvWave.\n- **Presentation of equations, figures, and results needs to be polished:**\n - Figure 1: Phase 1 would be nicer on the left. A brief intuition on what each loss is trying to achieve in the caption would be helpful\n - Section 3.2, in general, is very hard to follow along. L_retent is talked about lot before being explained. Include an intuitive explanation earlier. You introduce the notation for the size mappings of each component, but this makes it more confusing, in my opinion. I would put this in the appendix.\n - Section 3.5 - There is lots of repetition of equations here (e.g. equ 7 is the same as 5 and 6 similar to 1), it would be great if it could be folded into the other sections for conciseness\n - I’m not sure what the perk of having ASR-W is in addition to ASR-L. Often, LLMs are still jailbroken if they say, “I’m sorry,” so I’d expect ASR-W to have many false negatives. It would be good to manually check the false positive rate of ASR-L.\n - Figures 2 & 3 need axes labels and should use a color-blind friendly palette (without gradients). Figure 4 has text that is too small.\n- **Related work is majorly lacking citations and doesn’t contrast with AdvWave:**\n - Add related work to white-box attacks on VLMs - your work is very comparable to how people jailbreak VLMs, e.g., https://yunqing-me.github.io/AttackVLM/ , https://arxiv.org/pdf/2306.13213, https://arxiv.org/pdf/2402.02309. Also, vocalising the request is similar to putting typographic text into images (like FigStep, Images are Achilles Heel of Alignment, Jailbreak in pieces)\n - Add related work to white-box attacks on STT models - this is also very relevant, especially the imperceivable constraints. e.g. “Audio adversarial examples: Targeted attacks on speech-to-text”, “There is more than one kind of robustness: Fooling whisper with adversarial examples”.\n - There are many more papers than I provide here, and I’d recommend doing a proper literature review.\n - LALM section - I would cut the section around concerns of misuse. This should be discussed in the intro. You should cite frontier models like Gemini and GPT-4o advanced voice mode.\n - Jailbreak attacks on LLMs section - you should cite https://arxiv.org/abs/2404.02151\n- **Adaptive target search seems overly complicated:** why did optimising just for “sure” as the first token not work? This works in VLM literature. When comparing to optimizing for “sure”, did you use a prompt like in https://arxiv.org/abs/2404.02151? If not, optimizing for “sure” alone may be much weaker. I’d expect if you did this, the ASR would increase. Essentially, using an “adaptively search optimisation target,” you find a good starting point, but prompting the model to start the response with “Sure, here is…” might mean you don’t need this component. Also, why can’t you find a target string from another jailbroken LLM even if it has a very different structure to the output of the LALM? Shouldn’t gradient-based approaches still be able to change the model to output this?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "In the supplementary materials provided, I am puzzled about adding adversarial noise: 1. The authors mention that the adversarial noise is naturalized using urban environmental sounds as a masking method. However, I can still hear the traditional adversarial disturbances beyond the environmental sounds, suggesting the presence of two types of perturbations, which the paper does not mention. 2. The attack audio samples provided have adversarial disturbances implanted at the end silence segments of the audio, occupying about half the duration of the audio itself. It's unlikely for such a high proportion of silence in most audio datasets, revealing a serious issue: can adversarial attacks unrestrictively zero-pad benign audio ensure attack success? This seems to relate to the authors' initial claim that audio attacks on LALMs would limit the optimization search space for adversarial disturbances. I imagine the authors extended the audio to ensure sufficient search space, yet this seems impractical in real situations. 3. I am curious why the adversarial disturbances were added to the silence segments. Semantically rich portions of the audio seem more susceptible to attacks, and placing disturbances in silent parts would make the noise more detectable by human ears." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The work is groundbreaking as it introduces the first jailbreaking adversarial attack on LALMs. The authors have conducted extensive experimental comparisons, particularly by adapting jailbreaking methods from other domains to the audio sector to ensure the superiority of the proposed attack method. The contribution of this paper is indisputable. However, I still have some questions regarding the audio stealthiness mentioned by the authors." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents an innovative adversarial attack method targeting LALMs, marking the first successful attack on LALMs with optimized audio stealth. The efforts are commendable." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I believe the design motivation behind the authors' idea might be flawed. The audio provided clearly contains malicious content, so why consider the stealthiness of the adversarial disturbance? A normal listener would already notice something amiss with the content. Adding adversarial noise to silence segments inevitably leads to listeners hearing malicious content followed by eerie noises, which is utterly unconvincing from a realistic perspective. The authors should more reasonably consider the reasons for the stealthiness of adversarial disturbances and integrate them with the application scenarios of LALMs for a rational design." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We develop the first jailbreak framework against large audio-language models." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024advwave,\ntitle={AdvWave: Stealthy Adversarial Jailbreak Attack against Large Audio-Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0BujOfTqab},\nnote={under review}\n}" }, "abstract": { "value": "Recent advancements in large audio-language models (LALMs) have enabled speech-based user interactions, significantly enhancing user experience and accelerating the deployment of LALMs in real-world applications. However, ensuring the safety of LALMs is crucial to prevent risky outputs that may raise societal concerns or violate AI regulations. Despite the importance of this issue, research on jailbreaking LALMs remains limited due to their recent emergence and the additional technical challenges they present compared to attacks on DNN-based audio models. Specifically, the audio encoders in LALMs, which involve discretization operations, often lead to gradient shattering, hindering the effectiveness of attacks relying on gradient-based optimizations. The behavioral variability of LALMs further complicates the identification of effective (adversarial) optimization targets. Moreover, enforcing stealthiness constraints on adversarial audio waveforms introduces a reduced, non-convex feasible solution space, further intensifying the challenges of the optimization process. To overcome these challenges, we develop AdvWave, the first jailbreak framework against LALMs. We propose a dual-phase optimization method that addresses gradient shattering, enabling effective end-to-end gradient-based optimization. Additionally, we develop an adaptive adversarial target search algorithm that dynamically adjusts the adversarial optimization target based on the response patterns of LALMs for specific queries. To ensure that adversarial audio remains perceptually natural to human listeners, we design a classifier-guided optimization approach that generates adversarial noise resembling common urban sounds. Extensive evaluations on multiple advanced LALMs demonstrate that AdvWave outperforms baseline methods, achieving a 40\\% higher average jailbreak attack success rate. Both audio stealthiness metrics and human evaluations confirm that adversarial audio generated by AdvWave is indistinguishable from natural sounds. We believe AdvWave will inspire future research aiming to enhance the safety alignment of LALMs, supporting their responsible deployment in real-world scenarios." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "jailbreak", "adversarial attack", "audio-language model" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/99d9042de14ea3d96059ae2e2456e96d6ecfe53d.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/69c8e46c0a1fe8e52fa279ed68395137f45d6a44.zip" }, "title": { "value": "AdvWave: Stealthy Adversarial Jailbreak Attack against Large Audio-Language Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0C5iHPPwsG
Autoencoder-Based General-Purpose Representation Learning for Entity Embedding
main
Active
customer;embeddings;embedding;tabular;general;purpose;autoencoder;representation learning;general purpose;reconstruction loss;entity;entity embedding;entity representation;contractive autoencoder;dimensionality;reduction;latent;space;representation;feature;regularization;variational autoencoder
unsupervised, self-supervised, semi-supervised, and supervised representation learning
3;5;5;8
3;3;3;1
2;2;4;3
2;2;2;3
1;1;4;3
5.25
2.5
2.75
2.25
2.25
-0.889297
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Q: Did you all empirically assess how much time this k x d^3 adds time-wise compared with just a standard AE? Its an offline computation, but getting a sense of what that tradeoff comes out to time wise for your datasets would be interesting ( ie, is it that big of a hit in the end since the datasets are all below 45k instances each ). How does KernalPCA perform? \n\nQ: line 138 should probably cite the 2017 Transformers paper and not the 2023 arxiv one\n\nQ: Is there a reason in particular for using tanh activations in your extension of CAE? I get it allows for the decomposition shown, but are there other activations or ablations which could have been performed ?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The authors show how the CAE framework ( an AE with an additional objective component that is the Frobenius norm of the model with respect to the input ) can be extended to a multi-layer setting in a way that is advantageous when compared with prior extensions to CAEs which worked via stacking. \n\nThey do an extensive empirical analysis to discuss reconstruction and downstream accuracy benefits of the method while discussing costs of the method (scaling as the method is cubic with respect to layer size ) and its downstream limitations compared to KernalPCA.\n\nThe setting ( encoding tabular data with multi-modal data types ) is important and they show how its not handled readily or generally by Transformer type architectures." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this work the authors extend the Contractive AutoEncoder (CAE) framework for the calculation of the Jacobian \nof the entire encoder in the contractive loss from single-layer to multi-layer settings ( DeepCAE ).\n\n\nEmpirically over tabular benchmarks, the authors show DeepCAE can be leveraged in a general purpose embedding \nframework where embeddings are feed to XGBoost to obtain gains in reconstruction performance and comparable/slightly better performance \ndownstream prediction (classification/regression) performance as compared with various AutoEncoders and Transformer baselines \n( though not when compared with KernalPCA from a downstream performance perspective ). \nThey additionally show the noteable reconstruction performance of DeepCAEs compared with Stacked CAEs ." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1) Time comparisons and particularly error bars needed everywhere ( KernelPCA/StandardAE/DeepCAE ). For your benchmarks are you all running multiple seeds per each? \n\nThe main question ( which the authors have pointed out openly in the paper and for future work ) is if the cost of DeepCAEs is worth the effort? They’ve shown reconstruction is slightly better, but for tasks its pretty comparable to AE and KernalPCA does better (still an interesting finding ). How important is reconstruction loss really for this setup? What is the time complexity of KernalPCA and outside of reconstruction loss being subpar are there other reasons to not use it?\n\n2) Are there stronger baselines to compare against both encoding wise and classifier wise (ie, XGBoost vs something else) against if what we care about is tabular performance using embeddings? The former is the more important of the two and there is a NeurIPS workshop on fusing modalities for tabular data thats in its 3rd edition (https://table-representation-learning.github.io ) \n\n3) The general purpose embedding pipeline seems like the standard solution to re-using embeddings for downtsream tasks from vector databases and not particular to DeepCAE? Is this the case? If not, it could strengthen the paper to clarify how so if not.\n\n4) It would be interesting to either show experiments on or discuss how DeepCAE does on just image or text data as well to compare its reconstruction and downstream task performance there. Is there anything in particular that makes this approach specific to tabular data with multi-modal data? If the method gives performance boosts when encoding image/timeseries/text, it would greatly strengthen the results of the paper and would make incorporating the method.\n\n5) While the background on CAE was very much needed, the sections on VAE and Transformers were probably lesser so ( or could have been pushed into the appendix ) especially since you show effectively they are not nearly as effective. This space could/should be used for looking more at KernalPCA vs Standard AE vs DeepCAE costs/tradeoffs and potentially other modalities ( point 4)\n\n6) Did you all do experiments against the original CAE? The paragraph starting at 280 made it seem like you would ( and in the final section you do with StackedCAE), but then in the experiments Convolutional AE is used instead which I wasn't expecting. I'm assuming this is shown in past work when Stacked CAEs are introduced but having a sense of that as well would be good since its much cheaper computationally than both Stacked and Deep CAEs." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 1 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "In the existing regularization strategies, have other methods been considered, such as dropout or data augmentation? These techniques have been proven effective in preventing overfitting." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "This paper introduces DEEPCAE, a multi-layer contractive autoencoder designed for general-purpose entity embedding. By extending the contractive autoencoder framework to multiple layers while preserving regularization, DEEPCAE overcomes limitations seen in stacked CAEs, opening new possibilities for autoencoders with high-dimensional data. The study is thorough, with DEEPCAE evaluated across 13 diverse datasets, showing consistently strong results in both reconstruction and downstream tasks that highlight its effectiveness. The paper is well-organized, with clear derivations and detailed appendices on model architecture and hyperparameters to ensure reproducibility. Overall, DEEPCAE offers an efficient, versatile solution for embedding across domains, reducing feature engineering time and adding practical value for cross-application embeddings in industrial settings." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces DEEPCAE, a versatile entity embedding framework based on autoencoders. By extending contractive autoencoders (CAE) to a multi-layer structure and preserving the original regularization design, DEEPCAE enhances both reconstruction accuracy and downstream prediction performance for complex entity embeddings. In tests across 13 datasets, DEEPCAE consistently outperformed other autoencoder variants in both reconstruction error and predictive tasks, achieving a 34% reduction in reconstruction error compared to a stacked CAE. This framework offers an efficient, scalable solution for general-purpose entity embeddings across diverse domains, ultimately reducing time spent on feature engineering and boosting model accuracy." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "DEEPCAE demonstrates impressive results with contractive regularization, but it may not have explored other well-established regularization techniques, like dropout or data augmentation, which are effective in preventing overfitting. It would be beneficial for the authors to consider incorporating these strategies into the DEEPCAE framework. Doing so could enhance the model's robustness, and evaluating their impact on performance in future experiments would provide valuable insights into improving its effectiveness." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "Please see the weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The extension of CAE for multi-layer setting is simple yet effective. \n\nThe ‌authors conduct‌ experiments across 13 datasets and ‌cover‌ various types of entities.\n\nThe results demonstrate state-of-the-art performance on both reconstruction and ‌downstream‌ prediction." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes DeepCAE for learning general-purpose entity embeddings. Although DeepCAE extends from the contractive autoencoder, the authors provide a more effective design in calculating the multi-layered regularization term. Extensive experiments across 13 datasets demonstrate state-of-the-art performance of DeepCAE on reconstruction and ‌downstream‌ prediction tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The main paper should be self-contained. The authors may overly refer to the ‌original‌ CAE paper.\n\nThe motivation of DeepCAE and CAE is not clearly introduced. It is ‌confusing‌ for me why ‌they are designed for tabular data, and how ‌they are connected‌.\n\nIn the experimental results, the strengths of DeepCAE are not significant compared with the standard AE." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. The argument \"*Consequently, these representation methods not only are limited, but also fall short or are inapplicable ...*\" in line 47-48, do you have evidence to support this claim, such as references or experimental results? Also, what do \"*these representation methods*\" refer to?\n2. In the following paragraph starting at line 50, how are the two contributions related? The authors might consider clarifying that the framework is based on their proposed method.\n3. In section 2, what is the purpose of elaborating on PCA, variational autoencoders, and transformers? If they only serve as baselines, a brief introduction in the experimental setup might suffice. Instead, the focus should be on elaborating CAE in this section on its principle and structure. For examples, the steps to encode the input and decode it, the loss function.\n4. Eq. (1) lacks detailed description, such as the meaning of its symbols.\n5. In line 92-93, \"*Thanks to their ability to produce stable and robust embeddings, CAE were proven to be superior to Denoising Autoencoders (DAE)*\", the real reason behind *produce stable and robust embeddings* and *superior performance* is missing. The authors should make this argument more well-founded.\n6. In the first paragraph of section 4.1, the sentence \"*we analyze related work and find that, to the best of our knowledge, all use stacking Wu et al. (2019); Aamir et al. (2021); Wang et al. (2020), including Rifai et al. (2011b), who originally proposed the CAE.*\" is poorly written. Additionally, what kind of \"stacking\" was used? The authors should elaborate on how previous works implemented this.\n7. In line 223, how is the conclusion \"$O(d_x \\times d_h^2)$ to $O(d_x \\times d_h)$\" derived? I cannot deduce this from Eq. (3) alone. And \"*and $d_h$ is the hidden embedding space.*\", do you mean $d_h$ is the dimension of the hidden embedding space?\n8. The explanation of how Eq.(4) is obtained from Eq. (3) is unclear. The entire inference process from Eq. (3) to Eq. (8) lacks coherence.\n9. In line 274, \"*such as text and time-series*\" what kind of information is the \"text\" and \"time-series\" exactly? Could you provide examples?\n10. In Figure 1, how exactly do you concatenate the text encoding, TS encoding, and tabular data together?\n11. In section 5, what are the experimental settings, such as the number of encoder layers and feature dimensions? Additionally, the dataset statistics should be included in the main paper rather than the appendix, as they are important.\n12. How is the Mean Squared Error (MSE) computed? Is this metric conventionally used in previous work?\n13. In the first paragraph of section 5.1.2, \"*We trained XGBOOST (Chen & Guestrin, 2016) predictors ...*\", what is the XGBOOST model, and what exactly is the downstream task?\n14. In section 7, line 522-524, \"*Furthermore, we argue that the augmentative capabilities of more complex architectures like Transformers and CNNs are not necessarily useful in the production of a compact representation of an entity.*\", do you have evidence to support this argument?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The authors conducted sufficient experiments to validate the effectiveness of their proposed method. They also provide code and data to reproduce the results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a method called DeepCAE to calculate the regularization term for multi-layer contractive autoencoders and utilizes DeepCAE to power a general-purpose entity embedding framework. The experimental results show that DeepCAE outperformed the baselines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I think the prominent issue is the writing. 1) It **contains lots of tangential content** which is unnecessary to elaborate on in this paper. For example, in Section 2, I don't see the point of discussing PCA, variational autoencoders, and transformers, as they are not directly part of or foundational to your methodology. 2) The writing **lacks substantial details and is not self-contained**. This work is largely based on the previous CAE work by Rifai et al. (2021b), but the introduction to that work is incomplete, making it difficult to understand the proposed method and its details. Instead, the authors frequently refer readers to the original work. This problem also exists in the experiment, which did not clearly present the experiment settings. 3) **Lacking of logical coherence**. Some important arguments lack evidential support and are not clearly described.\n\nPlease see questions below for exact points." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We introduce DeepCAE, an enhanced multi-layer contractive autoencoder, and benchmark autoencoder architectures in a general-purpose tabular data embedding framework for reconstruction and downstream performance, with a 34% reconstruction improvement." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024autoencoderbased,\ntitle={Autoencoder-Based General-Purpose Representation Learning for Entity Embedding},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0C5iHPPwsG},\nnote={under review}\n}" }, "abstract": { "value": "Recent advances in representation learning have successfully leveraged the underlying domain-specific structure of data across various fields. However, representing diverse and complex entities stored in tabular format within a latent space remains challenging.\nIn this paper, we introduce DeepCAE, a novel method for calculating the regularization term for multi-layer contractive autoencoders (CAEs). Additionally, we formalize a general-purpose entity embedding framework and use it to empirically show that DeepCAE outperforms all other tested autoencoder variants in both reconstruction performance and downstream prediction performance. Notably, when compared to a stacked CAE across 13 datasets, DeepCAE achieves a 34% improvement in reconstruction error." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "customer", "embeddings", "embedding", "tabular", "general", "purpose", "autoencoder", "representation learning", "general purpose", "reconstruction loss", "entity", "entity embedding", "entity representation", "contractive autoencoder", "dimensionality", "reduction", "latent", "space", "representation", "feature", "regularization", "variational autoencoder" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/82d6ecb8781f42a98cd47903bf699fdc72d681e0.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/20d1da13784f315c8ad4e8924a1cfa9107f69b98.zip" }, "title": { "value": "Autoencoder-Based General-Purpose Representation Learning for Entity Embedding" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0CieWy9ONY
Scene Flow as a Partial Differential Equation
main
Active
Scene Flow;Neural Prior;Partial Differential Equation;Reconstruction
applications to computer vision, audio, language, and other modalities
6;6;6;8
4;4;4;3
3;3;3;3
3;2;3;3
2;3;2;3
6.5
3.75
3
2.75
2.5
-1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- The discussion of the different activation functions (appendix) is indeed interesting. And this could be one of the interesting parts of the ablation study. However, it is strange to see that using the Gaussian non-linear function is yielding very bad performance. Perhaps the spectral width needs to be fine-tuned, especially when the distribution of the lidar scene flow is very unique.\n\nPlease also see the above section for detailed comments." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper proposes an interesting idea to represent the scene flow as a velocity field using a neural network, making it very easy to combine the temporal information (time) and the spatial information (position of points).\n- The authors have done extensive analysis of the proposed method, and have shown different ablation studies to validate the effectiveness of the method.\n- The proposed method also shows the potential to deal with small objects and emergent flows in robotics scenarios, which could be interesting when applied to highly dynamic environments.\n- Overall, the writing of the paper is clear, and the visualization is easy to understand." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes to represent scene flow as a velocity field using a neural prior. Instead of prior art that directly represents per-pair scene flow as neural prior, the authors alternatively propose to use neural prior to model the partial differential equation (PDE) of the position of the point versus the time interval. This novel velocity representation is interesting and could offer flexibility in dealing with long-term sequences of flow estimations as the authors described in the paper. The authors have also done extensive analysis of the proposed method on Argoverse 2 (and Waymo) datasets, comparing the performance with recent scene flow works, and validating the good performance of the proposed method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- When using the time interval between [-1, 1] for the time encoding, will the proposed method not be able to handle time step outside the range? Given that the representation is a continuous neural network, how does it extrapolate to a longer sequence with the current representation?\n- When comparing with a method like NSFP, I wondered if the authors could show the results of pure Euler integration of the method and highlight the benefits of wrapping a PDE with a neural network.\n- The authors mentioned that they only do sequences of length 20, I wondered if the method failed rapidly with the increase of the sequence length. It would be interesting to show an even longer sequence to highlight the arbitrary time query property of the proposed method.\n- I feel like the authors want to talk about too many things in this paper, so they may overlook the most important part of the method. This method is good at dealing with long-term flow trajectory and has the potential to better capture the small, highly dynamic objects in the scene. The authors could reorganize the motivations and experiments to highlight the advantages of the proposed method." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Overall I believe this paper is in a good shape, the authors discuss the properties and limitations of the proposed method thoroughly in the paper. I have a few more questions:\n\n- How does the method handle scenes with deformable objects?\n- What is the impact of temporal sampling rate on performance?\n- How does the point cloud density affect the performance?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- I'm not up-to-date to the latest scene flow models, but from the results in the paper it surpass the prior art by a large margin, which is very significant\n- Introducing the concept of modeling scene flow as a PDE is innovative and offers a new direction for research in motion estimation.\n- The method is rigorously developed, with comprehensive experiments and ablation studies that validate the approach.\n- The paper is well-written, with clear explanations and effective use of figures to illustrate key points." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a novel approach to scene flow estimation by reframing it as the task of estimating a continuous space-time partial differential equation (PDE) that describes motion across an entire observation sequence. The proposed method, called EulerFlow, utilizes a neural prior to represent this PDE and optimizes it against multi-observation reconstruction objectives. This approach allows for high-quality, unsupervised scene flow estimation on real-world data, outperforming existing supervised and unsupervised methods, particularly on small and fast-moving objects like birds and tennis balls. The authors demonstrate the effectiveness of EulerFlow on datasets such as Argoverse 2 and Waymo Open Dataset, and show its applicability across different domains without domain-specific tuning. Additionally, they highlight emergent 3D point tracking behavior as a result of solving the estimated PDE over long time horizons." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- As stated in the paper, the speed of the proposed method is a big concern, preventing it from deploying on real world application.\n- Some hyperparameters, such as the depth of the neural prior, seem to require dataset-specific tuning (e.g., increasing depth to 18 for the Argoverse 2 challenge), which may affect the method's out-of-the-box applicability.\n- It would be great if the author could show more failure cases to help readers better understand its limitations." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Among all the three points I illustrated in the weakness, \n\nFor point 1, I hope the authors can provide concise update on their paper title and contributions in particular for the first bullet time (line 99-100).\n\nFor point 2, the current evaluation is sound and maybe sufficient for this paper. I do believe it is nice to more evaluation on non-AV dataset quantitatively that very likely will benefit this method as a baseline for future work in different domain. \n\nFor point 3, it will be good if the author can provide specific example (as a figure)" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The proposed scene flow representation is simple and technical sound. Compared to prior work NSFP, extending it to multi-frame and learns a bi-directional consistency scene flow is a very intuitive step forward. \n2. The performance of this method (both qualitative and quantitative) is impressive. The method can learn very consistent scene flow in trajectory despite not explicitly considering common issues such as occlusion artifacts. As the paper demonstrated, it can tackle well on small objects (with potentially large motions as well)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a neural representation to optimize scene flow as a discrete partial differential equation of scene geometry over time. Compared to previous method, Neural Scene Flow Prior (NSFP), a method is most related to this work in the neural representation, the proposed method introduces a multi-frame formulation and learns bi-directional three-step Euler integration of the geometry consistency using decoded per-frame scene flow. Compared to previous work, the proposed representation can achieve better performance in Autonomous driving datasets and the authors demonstrate qualitative performance on depth camera input as well." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper title and introduction is very general and does not provide a precise position of this paper's main contribution. \"Scene flow a partial differential equation\" has been historically formulated long time ago in many prior paper, e.g. [1] as one examplar reference, and it has been proposed as a continuous representation in one early seminal work [2]. Many related work studied this optimization problem using images input and solved it using differential optimization approaches before. In this paper seems only consider related work in the point cloud space, and beneficially solved in using a neural representation. I will suggests to more precise position their scope and contributions in paper title, introduction and contributions. \n\n2. The evaluation dataset in this paper is mostly on autonomous driving datasets though as the method demonstrated, it should also work on other data domain when depth is available. Though real world depth and flow ground truth is hard to get, it won't be too hard if evaluated using a more general synthetic dataset that provide different motion patterns, compared to the type of motion and accuracy that autonomous driving dataset can provide. \n\n3. The paper has already discussed the main limitations it section 6.1. Particularly for the last point \"EulerFlow does not understand ray casting geometry\", it was clear how this has been demonstrated in the current results. It will be good if the authors can provide examples and metrics that reflect the challenge in this aspect.\n\n[1] A Variational Method for Scene Flow Estimation from Stereo Sequences, Frederic Huguet and Frederic Devernay, ICCV 2007\n\n[2] Three dimensional scene flow, Vedula et al, CVPR 1999" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Here are some questions and concerns regarding the presentation and the method:\n\n1) In lines 189 and 195 $\\frac{\\partial L^*}{\\partial t}$ is referred to as the partial differential equation, or a PDE. However, $\\frac{\\partial L^*}{\\partial t}$ alone is not a PDE yet, unless it is set equal to something (as in equation 2).\n2) I guess that in equation 2 SFvPDE should also depend on $x$. Could the authors clarify this?\n3) In general, it would be nice to have more formal definitions. E.g. in EulerFlow, an exact formula for solving the PDE, $\\text{Euler}_\\theta(P_t, d, k)$, would improve understanding and reproducibility of the method.\n4) In principle, the PDE can be integrated in both directions by simply reverting the time. The usage of the direction as an extra argument in the model makes the connection between sections 3 and 4 slightly weaker and seems to be a legacy design choice from NSFP. Thus, a question to the authors is whether they have tried training without the direction argument?\n5) Given the high computational complexity of the method, it would be better to see some implementation details on how exactly equation 3 is calculated during training. Are any optimizations already incorporated? E.g. in the current form separate terms in the loss are independent. However, I believe that subsequent Euler steps can use previous estimates instead of recalculating them.\n6) More ablation studies would better highlight the contributions of the paper. E.g. how general and how sensitive is the method to different numerical solvers and sizes of discretization steps? Have the authors tried higher-order PDE solvers or using $\\Delta t$ smaller than the time between observations?\n\nI will adjust my score based on the other reviews and the rebuttal by the authors." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The method is simple and intuitive yet effective. Extensive experiments section shows clearly that the proposed method surpasses prior work." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes SFvPDE, a framework to cast scene flow estimation as a PDE with a neural prior, and EulerFlow, an example demonstrating how SFvPDE can be trained using the Euler method to locally integrate the PDE during training. A space-time-dependent vector-field is trained to match subsequent point clouds at different timestamps via solving the underlying PDE. The method significantly outperformes both supervised and unsupervised baselines and is especially effective on small objects compared to prior work." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The main weakness of EulerFlow, as also noted by the authors (lines 524-528), is the time it takes to converge on a single scene. But given the performance of the method, this should not be considered critical. However, the presentation of the paper can be improved, the paper lacks some implementation details, as from time to time the reader has to guess what is actually happening (see questions section)." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We model scene flow as an estimating a PDE over many observations; our unsupervised method is high quality (SotA on important benchmarks) and works out-of-the-box on many diverse domains." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024scene,\ntitle={Scene Flow as a Partial Differential Equation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0CieWy9ONY},\nnote={under review}\n}" }, "abstract": { "value": "We reframe scene flow as the task of estimating a continuous space-time PDE that describes motion for an entire observation sequence, represented with a neural prior. Our method, _EulerFlow_, optimizes this neural prior estimate against several multi-observation reconstruction objectives, enabling high quality scene flow estimation via pure self-supervision on real-world data. EulerFlow works out-of-the-box without tuning across multiple domains, including large-scale autonomous driving scenes and dynamic tabletop settings. Remarkably, EulerFlow produces high quality flow estimates on small, fast moving objects like birds and tennis balls, and exhibits emergent 3D point tracking behavior by solving its estimated PDE over long-time horizons. On the Argoverse 2 2024 Scene Flow Challenge, EulerFlow outperforms _all_ prior art, surpassing the next-best _unsupervised_ method by more than 2.5x, and even exceeding the next-best _supervised_ method by over 10%." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Scene Flow", "Neural Prior", "Partial Differential Equation", "Reconstruction" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/e02966061f51590650ee53716d5eba9c7c74b46f.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/627bb7b18ca6132faec34f993b20b47575f5ef5d.pdf" }, "title": { "value": "Scene Flow as a Partial Differential Equation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0CtIt485ew
Brain-inspired continual pre-trained learner via silent synaptic consolidation
main
Active
Continua learning; Silent synapse; Pre-trained model; neuroscience-inspired method
applications to neuroscience & cognitive science
3;3;5;5
4;4;4;3
2;2;3;2
2;3;2;2
2;2;3;2
4
3.75
2.25
2.25
2.25
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "None" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Minor:\n1.\tThe type function used for S_t is not specified. I suppose it is a step function, if so could you please confirm and specify?\n\n2.\tWhere does the name \"Artsy\" originate from? Is it an abbreviation for \"ARTificial Synapse\"? If so, could you specify this in the paper?\n\n3.\tCan the Artsy framework work in other continual learning settings beyond class-incremental learning (CIL)?\n\n4.\tHow are both the pre-trained network and the initialized subnetworks analogous to the mature brain network? Could you provide examples and references of networks and subnetworks coexisting and connected in the mature brain, but with different dynamics?\n\n5.\tWhat is the rationale behind naming the connections \"artificial synapses\"?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper presents an interesting and original idea by leveraging biological mechanisms of learning to enhance AI models, specifically focusing on the activation mechanisms of silent synapses through spike-timing-dependent plasticity. Given that the biological brain exhibits minimal effects of catastrophic forgetting compared to AI models, seeking inspiration from neurobiological learning mechanisms is a promising and innovative research direction. While initial explorations exist in the literature, there remains substantial room for further innovative research in this area.\n\n2. Although some additional details are necessary for a complete explanation and reproducibility of the experiments, the authors have made a commendable effort in describing the framework by providing both training and inference algorithms and biological motivation.\n\n3. The results, while needing a few more details for complete clarity, appear promising and suggest that the Artsy framework outperforms conventional methods on class-incremental learning tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces the Artsy framework, which enhances continual learning in pre-trained models by mimicking the activation mechanisms of silent synapses via spike-timing-dependent plasticity observed in biological neural networks. The framework maintains memory stability for previously learned knowledge in the pre-trained network while promoting learning plasticity in task-specific sub-networks during training. During inference, it uses artificial silent and functional synapses to connect pre-synaptic neurons in the pre-trained network with post-synaptic neurons in the sub-networks, enabling effective information extraction. Experimental results show that Artsy outperforms conventional methods on class-incremental learning tasks and offers better biological interpretability than other solutions to mitigate catastrophic forgetting." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "•\tThere are other literature works that propose biologically inspired solutions to mitigate catastrophic forgetting (see below), which are not covered in the background nor related work sections. I suggest adding these references to provide a more comprehensive context for the proposed framework in the neuroscientific context.\n\n•\tSome parameters needed for reproducibility of the results (incl. number of parameters, type of connectivity e.g. for silent and functional synapses) are not reported. For instance, the paper does not mention how many artificial synapses are used for each subnetwork or whether there is e.g. all-to-all vs sparse connectivity.\n\n•\tThe study mentions limitations of other algorithms in the background section regarding efficiency and computational time. However, the authors do not discuss these features of the Artsy framework compared to other algorithms. Efficiency, model complexity and computational time are important aspects that the authors should quantitatively analyze (or at least provide estimates for) to explain the performance vs efficiency tradeoff.\n\n•\tA potential limitation of the framework is the potential increase in the number of subnetworks and artificial synapses with the addition of more classes. This could pose scalability issues and raise questions about the biological plausibility of the framework. I suggest that the authors provide more information and comments on this aspect.\n\n•\tThe paper does not provide a link to the code, which is essential for reproducibility and further validation of the results.\n\n•\tStandard deviations for the results in Table 1 and Figure 3 and 4 are not shown. These are important for comparing the variability of the frameworks. In addition, the experimental setup lacks clarity regarding the number of runs averaged (for instance, for Figure 4B).\n\n•\t“Good” and “bad features” (sec 4.5) are not clearly defined\n\n•\tThe paper makes a claim that brain lesions causing synaptic disconnections can lead to dementia by disrupting synaptic consolidation but lacks references to support this. Moreover, the connection between artificial synapses of the Artsy framework and brain lesions needs to be made clearer.\n\n•\tMore targeted explanations of AMPA and NMDA receptors are needed, for example if the relevance of these receptors to short- vs long-term plasticity is related to the dynamics of functional and silent synapses \n\n•\tThe diagram for Figure 2C could show more than one subnetwork to accurately represent the architecture.\n\nIf the above points are clarified, I am happy to revise my score.\n\nSuggested references (non exhaustive):\nhttps://arxiv.org/pdf/2405.09637 , https://www.nature.com/articles/s42256-023-00747-w , https://www.nature.com/articles/s41467-022-29491-2 , https://arxiv.org/pdf/2403.13249 , https://proceedings.mlr.press/v232/madireddy23a.html , https://pubmed.ncbi.nlm.nih.gov/37589728/ , https://proceedings.mlr.press/v162/gurbuz22a/gurbuz22a.pdf" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. How $thr_t$ is selected? Is it learnable?\n2. What is $h$ in Eq. (5)? Is it $h_0$?\n3. How is $S_t$ optimized (LINE 283)?\n4. How often $m_t=0$ or $m_t=1$? \n5. What does `complete the prototypes for former classes' mean?\n6. How was it determined that the sub-network is trained for 20 epochs and the artificial synapse is trained for 2 epochs?\n7. What is a *good feature*? What is a *bad feature*?\n8. It would be interesting to see the performance on class incremental learning when only the pre-trained model is used. Are such experiments available?\n9. Will the code be publicly available?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "**Originality:** An innovative biologically inspired approach to avoid catastrophic forgetting while using pre-trained models for incremental class learning is presented. The emulation of silent and functional synapses to artificial networks is appraising and novel in the context of pre-trained models.\n\n**Quality:** A solid background on the biological foundations necessary to understand the composition of the Artsy framework is given.\n\n**Clarity:** The flow of the text is easy to follow. The objective of the paper is clearly stated. All necessary background is given to understand the presented approach. The experimental setup is explained in detail.\n\n**Significance:** The presented work is significant to advance the research in the domain of continuous/lifelong machine learning. The biological inspiration highlights this." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the problem of catastrophic forgetting of pre-trained models by presenting an architecture for class-incremental learning.\nThe architecture, the Artsy framework, is inspired by the plasticity of neurons in the brain. \nArtsy simulates silent and functional synapses. Specifically, (1) the fixed pre-trained network acts as a consolidated memory, (2) the sub-network learns the features of new incrementally available data, (3) artificial synapses interconnect the pre-trained network and sub-networks. \nHere, artificial synapses represent the silent and functional synapses.\nThe experimental results show that Artsy achieves superior performance on incremental learning tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**Inconsistencies in formulas** There are some inconsistencies between equations and the presented algorithms. For example, LINE 230, Eq. (1) defines $h_0 = E_0(x)$, while LINE 272, Algorithm 1, uses $F(x)$ and LINE 326, Algorithm 2, uses $E_0(x)$.\nLINE 323 states $E_0(x) + \\sum_{i=1}^{t}E^i(x) * m_i$ which is different from the expression within the parentheses in Eq. (5). Equation (3) and (4) are not explained enough. For example, what is the purpose of $m_t$ in general (apart from determining whether a synapse is silent or functional) and how $c_t$ is learned?\n\n**Weak ablation study**\nThe ablation study uses two different types of features as input to test the performance. For the ablation study per se, for example, it would be meaningful to see the separate contribution of $E_0(x)$ and $\\sum_{i=1}^{t}E^i(x) * m_i$ to the performance on the class incremental learning task.\n\n**Limited related work** While related work on silent and functional synapses and other approaches for class incremental learning is thoroughly presented, the related work on similar biologically inspired architectures is missing. Here, the comparison with other biologically inspired approaches would be beneficial. As an example, [1] can be considered.\n\n[1] German I. Parisi, Ronald Kemker, Jose L. Part, Christopher Kanan, and Stefan Wermter. Continual lifelong learning with neural networks: A review. Neural Netw., 113(C):54–71, May 2019." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see comments for \"weaknesses\"." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The authors propose an automatic gating process that can turn on or off individual adaptors depending on present inputs. For each adaptor, a distinct MLP is trained to predict if a present input is an in-distribution example. During inference, adaptors are activated only when the corresponding MLPs predict “match”. Consequently, if all MLPs are perfectly trained and are 100% accurate, only a single adaptor trained for a present input will be activated, and other adapters will be shut down, which means that we can expect a highly accurate prediction." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors proposed a continual learning system inspired by dynamical switching between silent and functional synapses in the brain. The actual mechanisms, however, has no real link to biological synapses that is extensively discussed in their study. Instead, their algorithm is a variation of the earlier algorithm referred to as ‘EASE’ in this study. EASE uses a pretrained encoder (visual transformer) as a backbone and trains adaptors to learn down-stream tasks. As each adaptor learns a new distinct task, catastrophic forgetting can be avoided." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "This proposed gating process is interesting, but the authors’ own comparison to EASE show that its advantage is marginal. As they used two simple tasks (CIFAR100 and TinyImageNet) to evaluate the newly proposed algorithm, it remains unclear whether the proposed gating mechanism is beneficial for more complex tasks.\n\nAs MLPs need to be trained with old and new data, the algorithm proposed in this study requires a type of replay memory, which is not clarified in the paper.\n\nThe authors’ description of the model (e.g., encoder (E_t(x)), prototypes and MLP) also needs improvements. Their study is based on Zhou et al (2024) study that proposes EASE, so the details of their study may overlap with Zhou’s study, but this does not mean that they do not need to explain their algorithm. They should extend and improve the description of their proposed algorithm for better readability." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Although the methods section of this article is described very clearly, many details are not introduced. For example, the pre-trained network E0(⋅).\n2. The author needs to explain the mathematical mechanisms underlying artificial synapses.\n3. The experiments are too weak, relying solely on two commonly used datasets (CIFAR-100 and TinyImageNet).\n4. The author should include additional experiments that provide interpretability to highlight the advantages of the biological mechanisms.\n5. The author should also provide some efficiency metrics to demonstrate the superiority of the model." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The author's approach to constructing a pre-trained model inspired by the activation mechanisms of silent synapses is commendable.\n2. The overall readability of the article is strong and easy to follow.\n3. The use of artificial silent and functional synapses establishes precise connections between networks, enhancing the extraction of relevant information during inference." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduced the Artsy framework, designed to enhance the continual learning capabilities of pre-trained models, addressing their vulnerability to catastrophic forgetting when incrementally trained on new tasks. Using their framework, the authors are able to achieve state-of-the-art performances on class-incremental learning tasks. Furthermore, this framework offered a promising avenue for simulating biological synaptic mechanisms." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The overall experiments have some shortcomings, as only two common datasets, CIFAR-100 and TinyImageNet, were used.\n2. Although the authors emphasize biological synaptic mechanisms throughout the paper, corresponding results are not observed in the results section.\n3. The authors mention that pre-trained artificial neural networks lack generalization capabilities, but they do not conduct corresponding experiments to address this issue.\n4. We would have appreciated a more detailed exploration of how the authors intend to enhance the model based on synaptic mechanisms, accompanied by a mathematical description of these processes. Regrettably, the current explanation remains overly simplistic." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024braininspired,\ntitle={Brain-inspired continual pre-trained learner via silent synaptic consolidation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0CtIt485ew},\nnote={under review}\n}" }, "abstract": { "value": "Pre-trained models have demonstrated impressive generalization capabilities, yet they remain vulnerable to catastrophic forgetting when incrementally trained on new tasks. Existing architecture-based strategies encounter two primary challenges: Firstly, integrating a pre-trained network with a trainable sub-network complicates the delicate balance between learning plasticity and memory stability across evolving tasks during learning. Secondly, the absence of robust interconnections between pre-trained networks and various sub-networks limits the effective retrieval of pertinent information during inference. In this study, we introduce the $\\textit{Artsy framework}$, inspired by the activation mechanisms of silent synapses via spike-timing-dependent plasticity observed in mature biological neural networks, to enhance the continual learning capabilities of pre-trained models. The Artsy framework integrates two key components: 1) During training, the framework mimics mature brain dynamics by maintaining memory stability for previously learned knowledge within the pre-trained network while simultaneously promoting learning plasticity in task-specific sub-networks. 2) During inference, artificial silent and functional synapses are utilized to establish precise connections between the pre-synaptic neurons in the pre-trained network and the post-synaptic neurons in the sub-networks, facilitated through synaptic consolidation, thereby enabling effective extraction of relevant information from test samples. Comprehensive experimental evaluations reveal that our model significantly outperforms conventional methods on class-incremental learning tasks, while also providing enhanced biological interpretability for architecture-based approaches. Moreover, we propose that the Artsy framework offers a promising avenue for simulating biological synaptic mechanisms, potentially advancing our understanding of neural plasticity in both artificial and biological systems." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Continua learning; Silent synapse; Pre-trained model; neuroscience-inspired method" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/b07e76de0d2f605fe68807d9d8cc11a53e2574f8.pdf" }, "presentation": null, "primary_area": { "value": "applications to neuroscience & cognitive science" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Brain-inspired continual pre-trained learner via silent synaptic consolidation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0CvJYiOo2b
Revisiting PCA for Time Series Reduction in Temporal Dimension
main
Active
principal component analysis (PCA);time series classification;time series forecasting;time series extrinsic regression
learning on time series and dynamical systems
3;3;5;5
5;4;2;4
2;1;3;2
2;2;2;2
2;3;3;3
4
3.75
2
2
2.75
-0.688247
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Q1. The discussions provided in section 3.2 are not really theoretical analysis. The section title is a bit misleading. I would suggest to either break this section down to the motivation of the work, or rename it to some candidates like intuitional justification.\n\nQ2. In line 340, why only 5 datasets from the UEA is selected, out of 30+ multivariate datasets? Also, the five datasets are finally precessed into univariate datasets, in which case why the original 100+ univariate datasets are excluded?\n\nQ3. The accuracy reported in table 2 is pretty low on the first two datasets. And the differences with or without PCA are huge on some cases, e.g., FEDformer and TimesNet on SelfRegulationSCP1. Could the authors provide justification for these numbers? Otherwise, this would damage the versatility of the proposed approach.\n\nQ4. What is the backbone model for the results in Table 5?\n\nQ5. I am not sure if I fully understood line 302-304, “For example, if all positive trends…” Could the authors further explain a bit on this for me? Thanks!" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "S1. This paper attempts to improve the efficiency of time series analysis tasks, which can be very useful for resource-constrained scenarios including edge computing.\n\nS2. The evaluation was conducted on three different tasks, i.e., time series classification, forecasting, and extrinsic regression, demonstrating the versatility of the proposed methods.\n\nS3. The paper is easy-to-parse." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This manuscript revisits Principal Component Analysis (PCA) to explore its utility in reducing the temporal dimension of time series data, as a novel area of focus, because PCA has traditionally been applied mostly on the variable space. The paper posits that PCA, when applied to sliding series windows, not only maintains model performance but also enhances computational efficiency. Extensive experiments across time series classification, forecasting, and extrinsic regression tasks substantiate these claims. The paper suggests that PCA preprocessing results in reduced training and inference times without compromising on model effectiveness across various deep learning-based time series analysis models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "W1. It is still hard to conclude from this paper that PCA before feeding into deep neural networks is a versatile solution that should be suggested to time series analysis tasks under resource constraints. Briefly speaking, neural networks, especially the early layers of neural networks, are considered as feature extraction as well as dimension adjusting. This overlaps a bit with the purpose of PCA. \n\nW2. There are more dimensional reduction techniques for time series or high-dimensional vectors, rather than PCA itself. For example, DWT, FFT, etc. Although have been briefly discussed, it would still be very important to compare PCA with other deep model-agnostic dimension reduction techniques." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Q1. Could you please include a comparative analysis section in their paper, directly comparing PCA's performance, computational efficiency, and memory usage against the methods you mentioned (T-Loss, TS2Vec, and autoencoder-based approaches). This would provide a clearer context for evaluating PCA's contribution relative to recent advances in the field.\n\n- Q2. Coud you please include state-of-the-art classification models like InceptionTime and ResNet in their comparison for the classification tasks, providing a stronger baseline for evaluating PCA's impact.\n\n- Q3. I Believe that it would be valuable to expand the forecasting experiments to include additional widely-used datasets such as traffic and electricity, alongside the ETT datasets. Suggest specific datasets that are commonly used in the field for benchmarking." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "- S1. I think it is interesting to use unsupervised learning as a first step to reduce memory and computation time for downstream supervised tasks. This approach could be particularly beneficial when dealing with large amounts of time series data with numerous timestamps.\n\n- S2. The paper is well written and easy to follow, making the concepts and methods presented clear and accessible to the reader.\n\n- S3. The experimental results show that PCA accelerates both the training and inference processes while reducing the GPU memory usage for the considered downstream tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper explores the application of Principal Component Analysis (PCA) for dimensionality reduction of the temporal dimension. The authors argue that PCA's ability to reduce dimensionality enables the extraction of essential features underlying time series, thereby improving the efficiency of downstream tasks. Experimentally, the study applies forecasting, classification, and extrinsic regression tasks to the PCA-learned representations. The results show significant improvements in computational time and memory consumption compared to purely supervised approaches applied directly to the raw time series." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- W1. A significant weakness of the paper is its lack of discussion and comparison with other representation learning methods. \n - Several claims in the paper appear to be inaccurate, such as: \"To the best of our knowledge, there has been no systematic method for compressing time series data in the temporal dimension while preserving critical information\" and \"far less attention has been given to reducing the temporal dimension, despite the potential benefits of alleviating the burdens associated with processing long time series.\" In recent years, various unsupervised time series methods have effectively addressed this issue. For example, T-Loss [1] was one of the first models to fully compress the temporal dimension by leveraging contrastive learning and an encoder-only architecture. Another contrastive method, TS2Vec [2], learns representations that can be used for forecasting and classification in subsequent stages. Additionally, methods based on autoencoders with vector quantization [3,4] have demonstrated the ability to compress the temporal dimension by learning the core features of time series data. \n - The use of PCA representation does not appear to enhance the performance of the supervised model. While the authors argue that PCA representation accelerates training and inference (and reduces memory usage), the omission of other representation learning methods—such as a basic convolutional encoder-decoder—makes it difficult to fully evaluate the contribution of this paper.\n\n\n- W2. From an experimental perspective, several aspects seem questionable.\n - For the classification tasks, the authors selected a few datasets from the UEA and applied PCA pairs with models that are primarily known as forecasting baselines (except for TimesNet). The reported results, whether with or without PCA, do not represent state-of-the-art performance. It would have been beneficial to include models like InceptionTime or a simple ResNet for comparison. \n - In the forecasting tasks, the authors focused solely on the ETT datasets, which are recognized for their difficulty in forecasting. It would be more insightful to conduct similar experiments on datasets such as traffic or electricity, which may provide additional context and validation for the proposed methods.\n\n\n[1] Unsupervised scalable representation learning for multivariate time series, Neurips 2019\n\n[2] Ts2vec: Towards universal representation of time series, AAAI 2022\n\n[3] Vector Quantized Time Series Generation with a Bidirectional Prior Model, AISTATS 2023\n\n[4] Interpretable time series neural representation for classification purposes, IEEE DSAA 2023" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The paper does not seem to demonstrate whether the number of time steps is reduced or to what extent. It only generally mentions at the beginning that applying PCA will compress the time series, but it is unclear whether the compression target is the time steps or the data component features within the sliding window. In the Introduction, the authors state that dimensionality reduction techniques for time series data mainly focus on the variable dimension, and they intend to apply PCA for dimensionality reduction along the time dimension. However, from the overall description, the authors appear to only apply PCA to the time-step data within the sliding window to extract local features. This method extracts feature information from each window, but the number of time steps within the window seems to remain unchanged. \n\n2. It is currently unclear whether the authors also applied PCA to the test set in the classification task. If the authors used PCA to preprocess the test set, this would be unreasonable because the test data should be assumed to be unknown beforehand. If the authors did not apply PCA to the test set, maintaining the original data format and attributes while keeping the network unchanged, then theoretically, there should not be a significant acceleration in inference time. \n\n3. There is an issue with unreasonable descriptions in the related work section. The authors discuss the limitations of Xu et al.'s work titled \"Transformer multivariate forecasting: Less is more?\" In their second point, they state: \"Secondly, it is designed for scenarios where a multivariate series forecasts a univariate series, focusing on reducing the variable dimension of covariate series without preprocessing the target variable series, even if the covariate series may have minimal association with the target series.\" \n\n4. In the theoretical analysis section, as shown in Figure 3, the authors only demonstrate the effectiveness of PCA in reducing dimensionality along the feature dimension. However, they do not address dimensionality reduction along the time dimension (i.e., the compression of the number of time steps). \n\n5. It appears that the experimental results presented in Figure 3 are based on a single experiment. The authors did not verify the generalizability of their results by experimenting with different numbers of principal components, k. Without varying k, there's a risk that the chosen value might be a \"lucky number\" that coincidentally yields favorable results. \n\n6. The experimental results may be influenced by the characteristics of the specific dataset used. The smoothing effect observed in Figure 3 might only be applicable to the current dataset and may not represent the performance on other time series data. Including experiments on a variety of datasets could improve the credibility of the conclusions. \n\n7.The authors propose that \"Specific trends and periodic patterns in historical series may not be crucial for the learning of TSA models.\" In the field of Time Series Analysis (TSA), traditional viewpoints and a large body of research emphasize the importance of trends and periodic patterns. These elements are critical for understanding the inherent structure of the data and for predicting future values. \n\n8. The authors' statement, \"Therefore, although PCA may alter the trend or periodicity, it introduces new coherent patterns—such as the main directions of variation, denoised low-dimensional representations, and latent features—that benefit TSA model learning without negatively impacting predictive performance,\" is overly absolute. Claiming that there are no negative impacts is too definitive. In practical applications, any data transformation can potentially have both positive and negative effects on model performance; the specific outcome depends on the characteristics of the data and the type of model employed.\n\n9. It seems that the experiments presented in Tables 2, 3, and 4 are based on single runs without any statistical significance testing. There is no indication of whether the results are consistent across multiple trials or if they could be due to random chance. Furthermore, in each experiment, the number of principal components (k) selected for PCA is based on a single value, and this value differs across different datasets.\n\n10. In Table 2, concerning the Time Series Classification (TSC) experiments, the authors conclude based on the results: \"These results reveal PCA’s efficacy in extracting series information for TSC tasks without performance loss, enabling faster training/inference.\" However, the results presented in Table 2 indicate that applying PCA on certain datasets and networks can lead to significant performance degradation. For example, on the SelfRegulationSCP1 dataset, the accuracy of the TimesNet network decreased by 23.2% after applying PCA. This substantial drop contradicts the authors' absolute assertion of \"without performance loss.\" Out of the 20 metrics reported, only 10 show performance improvement when PCA is applied, which amounts to just 50%. This proportion raises doubts about the claim made in the abstract that applying PCA to sliding sequence windows can maintain model performance.\n\n11.The authors state in Table 3: \"The results of Linear are adapted from the study (Zeng et al., 2023).\" However, upon reviewing the cited paper by Zeng et al. (2023), I was unable to locate the specific data presented by the authors. This discrepancy raises concerns about the reliability and accuracy of the data used in their experiments." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The idea of applying PCA within sliding windows offers a fresh perspective on dimensionality reduction for time series data. By reducing the dimensionality of the input data, the proposed method can decrease computational load, which is particularly beneficial for deep learning models dealing with large-scale or high-frequency time series data." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose an innovative approach for preprocessing time series data by applying Principal Component Analysis (PCA) to data within sliding windows. This method aims to extract the principal components from the data, effectively reducing dimensionality before feeding it into a deep learning network. Traditionally, it is commonly believed that applying PCA along the time dimension can disrupt temporal dependencies inherent in time series data. Contradicting this notion, the authors suggest that applying PCA to sliding sequence windows can preserve model performance while enhancing computational efficiency. They support their claims with experiments conducted on three primary tasks: time series classification, prediction, and regression." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The study exhibits several weaknesses. Firstly, it lacks clarity on dimensionality reduction along the time dimension, focusing instead on feature dimension reduction without reducing time steps, which contradicts its stated goal. Secondly, the application of PCA to the test data in classification tasks is ambiguous; applying PCA to test data is inappropriate, but without it, the claimed acceleration in inference time lacks justification. Thirdly, the study misrepresents related work by critiquing standard practices designed to prevent information leakage, and its theoretical analysis fails to support the core claim of time dimension reduction. Additionally, the experiments lack statistical validation and parameter exploration, relying on single runs with fixed principal component numbers, raising concerns about generalizability and potential overfitting. The authors make overgeneralized and absolute claims about the benefits of PCA without sufficient evidence, ignoring observed performance degradation in certain datasets. Furthermore, limited dataset diversity suggests results may be dataset-specific, and discrepancies in reported data raise doubts about reliability. Lastly, the study challenges established concepts in time series analysis without adequate empirical support, and methodological inconsistencies, such as varying the number of principal components without clear rationale, hinder reproducibility and limit the applicability of the findings." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "What makes PCA particularly effective here? Is there something unique about the space spanned by its vectors?\n\nHow does the dimensionality affect the results? A graph showing MSE versus the number of dimensions (n) would be helpful.\n\nWhen selecting the first n eigenvalues, do you choose the largest, the smallest, or select them randomly?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- It clearly introduces the problem of temporal dimensionality reduction in time series data and provides a solid rationale for using PCA.\n- The experimental setup is thorough, covering various time series tasks (classification, forecasting, and regression) and a range of model types (Linear, Transformer, CNN, RNN), which effectively illustrates the generalizability of the approach.\n- The paper strengthens its argument by presenting concrete metrics, such as GPU memory reduction and speed improvements." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper investigates using Principal Component Analysis (PCA) to reduce the temporal dimensionality of time series data in deep learning models for tasks like classification, forecasting, and regression. Traditionally, PCA has been applied to reduce variable dimensions, but this study applies PCA across time windows, aiming to maintain temporal structure while reducing redundancy and computational costs. Results show that PCA preprocessing can accelerate training and inference by up to 40% in some models, like Informer, and reduce memory usage by 30% in models like TimesNet without compromising accuracy. PCA also proves effective in noise reduction, retaining essential statistical characteristics and supporting efficient learning across different deep learning models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The intuition behind why PCA is specifically suitable for time series dimensionality reduction is not clearly explained. Many studies have shown that using orthogonal bases (e.g., FFT, wavelets, Legendre polynomials) can improve performance and reduce dimensionality, yet the paper does not address how PCA differs or why these methods were not included in comparisons.\n- Adding comparisons with modern compression techniques, beyond linear methods and downsampling, could make the evaluation more robust.\n- Some sections, particularly on the theoretical underpinnings of PCA’s use for time series, could benefit from clearer explanations to aid reader comprehension.\n- Each table could benefit from explanations of the metrics used, clarifying what constitutes a “good” or “bad” result (e.g., lower MSE is better), which would help readers interpret the results more easily.\n- More detailed visualizations, such as diagrams showing PCA’s effects on time series structure and feature retention, could enhance clarity." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "This study employs PCA for preprocessing time series before inputting it into deep-learning models to enhance model efficiency." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024revisiting,\ntitle={Revisiting {PCA} for Time Series Reduction in Temporal Dimension},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0CvJYiOo2b},\nnote={under review}\n}" }, "abstract": { "value": "Deep learning has significantly advanced time series analysis (TSA), enabling the extraction of complex patterns for tasks like classification, forecasting, and regression. While dimensionality reduction has traditionally focused on the variable space—achieving notable success in minimizing data redundancy and computational complexity—less attention has been paid to reducing the temporal dimension. In this study, we revisit Principal Component Analysis (PCA), a classical dimensionality reduction technique, to explore its utility in temporal dimension reduction for time series data. It is generally thought that applying PCA to the temporal dimension would disrupt temporal dependencies, leading to limited exploration in this area. However, our theoretical analysis and extensive experiments demonstrate that applying PCA to sliding series windows not only maintains model performance but also enhances computational efficiency. In auto-regressive forecasting, the temporal structure is partially preserved through windowing, and PCA is applied within these windows to denoise the time series while retaining their statistical information. By preprocessing time series data with PCA, we reduce the temporal dimensionality before feeding it into TSA models such as Linear, Transformer, CNN, and RNN architectures. This approach accelerates training and inference and reduces resource consumption. Notably, PCA improves Informer training and inference speed by up to 40% and decreases GPU memory usage of TimesNet by 30%, without sacrificing model accuracy. Comparative analysis against other reduction methods further highlights the effectiveness of PCA in enhancing the efficiency of TSA models. Code is provided in the supplementary materials." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "principal component analysis (PCA)", "time series classification", "time series forecasting", "time series extrinsic regression" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/cae3a553d272ea71124861659e8713307733da09.pdf" }, "presentation": null, "primary_area": { "value": "learning on time series and dynamical systems" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/c4482cce77197e1b62a6e550dbab1ad5f101fd2a.zip" }, "title": { "value": "Revisiting PCA for Time Series Reduction in Temporal Dimension" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0DZEs8NpUH
Personality Alignment of Large Language Models
main
Active
Personality Alignment;Large language models;behavioral preferences of LM
generative models
5;5;6
4;4;5
3;3;4
2;2;3
3;3;3
5.333333
4.333333
3.333333
2.333333
3
1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Some additional questions remian:\n\nCan the authors provide more details on the human evaluation process, such as annotator screening, training, and inter-annotator agreement metrics? This would help validate the human evaluation results. The paper is light on this.\nHow do you expect the methods to perform on multilingual models and non-English datasets? \nThe discussion on negative societal impacts of AI hyper-personalization (e.g. filter bubbles, opinion polarization) and is light. Authors should exand on this more. \nFinally, exploring beyond multiple choice for collecting preference data, such as open-ended text can be interesting and more useful." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "A key strength of the work is the PAPI dataset, with over 300,000 real-world subjects providing detailed responses to the IPIP-NEO-120 and IPIP-NEO-300 questionnaires. The scale is impressive.\n\nThe Personality Activation Search (PAS) method is interesting. By identifying key activation vectors that correspond to personality traits and optimally shifting activations in those directions, this approach can more effectively do personality alignment during inference. \n\n The authors also conduct a comptehensive evaluation of PAS, comparing it against prompting-based (Few-Shot, P2) and RL-based (PPO, DPO) baselines on the PAPI dataset." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes the concept of Personality Alignment for tailoring LLMs to match the preferences and behaviors of individual users or groups. The authors created a large-scale dataset called PAPI with data on behavioral preferences from over 300,000 real subjects across the Big Five personality dimensions. They also propose the Personality Activation Search (PAS) method for efficiently aligning LLMs with individual preferences during inference. PAS identifies key activation vectors corresponding to personality traits and optimally shifts activations in those directions. The authors show that PAS achieves strong performance in capturing individual preferences with high compute efficiency compared to baseline methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While the PAPI dataset is impressively large, it doesnt seem to be diverse. Around 60% of subjects are female and the average age is 25 years. This skew can potentially bias the results and limit generalizability to other populations. \nAlso, PAPI dataset relies on self-report data from personality questionnaires. While this is a standard approach in personality research, self-reports can be subject to biases such as social desirability and lack of self-insight. Incorporating additional data sources, such as behavioral measures or peer ratings, could be more useful.\n\nThe evaluation of PAS focuses primarily on high-level alignment with the Big Five traits. However, personality is a complex, multifaceted construct, and individuals can vary in their expression of specific facets within each trait. \n\nThe PAPI dataset uses a multiple-choice format for collecting personality data. While this allows for structured and efficient data collection, it may limit the richness and naturalness of the responses. \nThe paper also compares PAS to prompting and RL baselines but does not include a comparison to fine-tuning the entire language model. This is an important consideration as well.S" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- In any of your experiments, did any of your tuning or evaluation methods trigger safety wall? For example, the model might refuse to answer some questionnaire questions related to consciousness or self-awareness." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- Overall, I like this paper and I think it's a very good attempt in aligning LLMs from a personality perspective. \n\n- The paper is generally well-written and easy to read, illustrations are also made in a good quality.\n\n- It's cool to see how personality affects downstream reasoning tasks. It has always been something missing in prior personality-related LLM work. And it's definite a god step here.\n\n- The proposed method is efficient, and can provide better results compared to prompting-based methods. It may have wide applications in tailoring persona/personality-specific chatbots to end users." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces the concept of Personality Alignment for LLMs, that is, tailoring of responses to individual user preferences based on personality traits. Using the Big Five personality theory, the authors introduces the PAPI dataset modeling human personality distributions. The authors also propose a LLM tuning method based on disabling certain activation heads -- Personality Activation Search (PAS). Evaluation results demonstrate PAS’s performance and efficiency compared to traditional alignment techniques including RL- and prompting-based methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Presentation: The authors claim that they collected 307k human samples to craft the PAPI dataset and the use of the IPIP-NEO-120/IPIP-NEO-300 for evaluations as part of their contributions. However, the IPIP-NEO series inventories were frequently used in prior works (e.g., Jiang et al., 2024 as mentioned in the paper), and the 307k responses are also publicly available.\n\n- The implications of the 307k PAPI dataset are not that clear. For example, in the experiments authors performed, it seems only the overall average personality tendencies and specific participants' responses are used. So what's the actual advantages of using a such large dataset?\n\n- The proposed tuning method, although efficient, but requires access to model weights. I doubt its ability to generalize such methods to black-box methods (e.g., GPTs) in the future." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Could the authors explain why the PAS method is so effective by looking at the internal hidden layer representations of the model? Intuitively, direct training approaches like DPO/PPO might yield more noticeable improvements, but the results here contradict my expectations.\n\nIn line 429, the authors mention \"Why Did Scaling Laws Fail?\" but don't seem to fully answer this question. I would like the authors to explain this from the perspective of the domain's specificity. Is it because larger models learn more general alignment, which makes it harder for them to excel at aligning with a specific personality?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The strengths of this paper can be summarized in three key points:\n\n- It contributes a new dataset (or more accurately, a dataset generation pipeline).\n- The proposed method is simple yet highly effective.\n- The experimental analysis is comprehensive, with particularly detailed descriptions of the experimental setup." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper explores the personality alignment of large language models (LLMs). Specifically, it introduces a new dataset and proposes the PAS method for personality alignment based on this dataset." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "However, I believe the paper has the following weaknesses:\n\n- The scope of the contribution is somewhat limited. I would have liked to see this method applied to a broader range of personalities, rather than being restricted to just the five personalities of the Big Five model. The authors could consider additional datasets, such as the Dark Triad or even the MBTI test (though MBTI remains controversial in psychology). Expanding in this way would enhance the paper’s overall contribution.\n\n- While the proposed method performs well on the dataset, it is actually quite similar to approaches used in many previous studies [1][2]. As a result, the novelty of the method is somewhat lacking, and the authors should be careful not to overstate their contribution.\n\n- Essentially, the proposed method is a form of personalized alignment, so the authors should compare it against more baselines [3].\n\n- Figures 6 and 7 show a significant disparity between LLM-as-a-judge evaluations and human evaluations, which makes me question the consistency and reliability of the judgment process.\n\n- Other details: In line 1304, the authors mention that human annotators come from machine learning and computer science, but ML is a part of CS. Additionally, could the authors disclose the educational background of the annotators (undergraduate or graduate)?\n\n[1] Zheng, Chujie, et al. \"On prompt-driven safeguarding for large language models.\" *Forty-first International Conference on Machine Learning*. 2024.\n\n[2] Wang, Haoran, and Kai Shu. \"Backdoor activation attack: Attack large language models using activation steering for safety-alignment.\" *arXiv preprint arXiv:2311.09433* (2023).\n\n[3] https://github.com/liyongqi2002/Awesome-Personalized-Alignment" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We introduce Personality Alignment for language models, efficiently tailoring responses to individual user preferences, providing the‘ PAPI dataset with over 300K subjects and a practical, efficient alignment method." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024personality,\ntitle={Personality Alignment of Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0DZEs8NpUH},\nnote={under review}\n}" }, "abstract": { "value": "Current methods for aligning large language models (LLMs) typically aim to reflect general human values and behaviors, but they often fail to capture the unique characteristics and preferences of individual users. To address this gap, we introduce the concept of Personality Alignment. This approach tailors LLMs' responses and decisions to match the specific preferences of individual users or closely related groups. Inspired by psychometrics, we created the Personality Alignment with Personality Inventories (PAPI) dataset, which includes data from 300,000 real subjects, each providing behavioral preferences based on the Big Five Personality Factors. This dataset allows us to quantitatively evaluate the extent to which LLMs can align with each subject's behavioral patterns. Recognizing the challenges of personality alignments—such as limited personal data, diverse preferences, and scalability requirements—we developed an activation intervention optimization method. This method enhances LLMs' ability to efficiently align with individual behavioral preferences using minimal data and computational resources. Remarkably, our method, PAS, achieves superior performance while requiring only 1/5 of the optimization time compared to DPO, offering practical value for personality alignment. Our work paves the way for future AI systems to make decisions and reason in truly personality ways, enhancing the relevance and meaning of AI interactions for each user and advancing human-centered artificial intelligence." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Personality Alignment", "Large language models", "behavioral preferences of LM" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/f8f5075ac22c1f67f04cfb181360c9e65f0b6466.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/2591f5eeb5b79187828d6d7400ee27aecf8e8762.zip" }, "title": { "value": "Personality Alignment of Large Language Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0EP01yhDlg
Faster Language Models with Better Multi-Token Prediction Using Tensor Decomposition
main
Active
Large language model;Self-speculative decoding;Multi-token prediction;Low-rank approximation
foundation or frontier models, including LLMs
3;5;5;5
4;4;3;4
2;2;3;3
2;3;2;3
2;3;3;3
4.5
3.75
2.5
2.5
2.75
-0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please address the issues above." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "-- The paper studies an interesting problem to speed updecoding by predicting multiple tokens in parallel at higher acceptance rates than typical speculative sampling approaches.\n\n-- The proposed solution seems straightforward to implement.\n\n-- The contribution to identifying issues with existing multi-token training approaches and proposing a higher rank alternative is novel." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper borrows key idea from Gloecke et al. [1] to train multi-token predictors instead of single next word predictor. This work identifies a key flaw in [1] which is that the distributions for multiple $n$ future tokens are independent of each other thus ignoring the token interdependency. This work interprets this as a rank-1 approximation to the full distribution tensor of $n$ next tokens and proposes to improve this to a higher rank estimate. This higher rank estimate is achieved by $r$ multiple heads defining $r$ different distributions and using their mixture for the $n$-future token prediction. Training and inference method for this is discussed followed by an observation that the multi-token predictor can be used in self-speculative sampling approach where the next word prediction is made faster by using proposal distribution that predicts multiple next tokens. The experiments are mainly performed on nano-GPT model architecture trained on TinyStories dataset and also finetuning the PyCodeGPT model." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "-- The evaluation leaves a lot to be desired. Experiments are done on small datasets and small models but more concerningly, little else is provided aside from loss curves of training runs and token acceptance rates for the scheduled sampling approach. As an example, performance of these models on various benchmarks to estimate the quality of these trained models would aid in better assessment of the approach. Also, it is unclear if this approach empirically scaled to larger datasets and models effectively in terms of speed and performance.\n\n-- Comparison to other speculative sampling approaches with various draft models will give abetter idea about the improvement on speed and resources with the proposed approach.\n\n-- There is room for improvement in presentation. Figure 1 doesn't help with understanding the paper better and is confusing. Algorithm 1 can also be described more clearly. Currently, it hinges on the reader's prior understanding of speculative decoding." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "-How does the method combine with beam search?\n\n-Does the speedup increase or decrease as a function of model size?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "-Tackles an interesting and important problem\n\n-The method is written clearly. \n\n-I also find the connections to tensor decomposition interesting." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper studies multi token prediction in transformer language models. Vanilla autoregressive degressing is expensive for long outputs since it only decodes one output at a time.\n\nThe authors are inspired by the work of Gloeckle et al. 2024. In Gloeckle et al. 2024, given a context x_{t:1} the next n tokens are predicted independently (with multiple heads). As the authors point out, this amounts to a rank-1 tensor approximation of the joint probability distribution.\n\nIn this work, the authors explore higher ranks (r > 1) using CP decomposition. They draw a connection to mixture-of-experts and propose a auxiliary load balancing strategy so all the weight is not on one expert (component). \n\nThey then perform experiments validating their work." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Some confusion on experimental results: I'm a bit confused as to how much of a speed up the author's approach gives over both the approach of Goeckle et al. 2024 (i.e. rank=1) and also vanilla non-autoregressive decoding for the same level of quality. \n\nFor example in Table 1: I see that in Table 1 the final column (time per token) is not much different across all the rows?\n\nMoreover I don't quite understand Table 3.\n\nComparisons: I think the authors need additional baselines in addition to just ablations of their own approach from the related work.\n\nRelated work: The authors should also cite and discuss related work in non-autoregressive decoding (typically for neural machine translation) that has been developed for a while e.g. see below and citations therein.\n\nhttps://arxiv.org/abs/1711.02281\nhttps://arxiv.org/abs/2204.09269\nhttps://arxiv.org/abs/2012.15833" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See above." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "(1) The method is well-motivated and explained clearly. The connection to MoE, which motivates a load-balancing auxiliary loss, is also interesting.\n\n(2) The paper seeks to improve inference speed in large models, which is an important problem." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "One existing form of speculative decoding involves predicting k tokens at a time independently, which can be thought of as a rank-1 decomposition of the k-order joint probability tensor over those tokens. This paper instead proposes to predict the factors for a rank-r decomposition. They evaluate two instantiations of this idea: training a LM from scratch to predict this decomposition, and taking an existing LM and fine-tuning additional heads to predict this decomposition. Their experiments show that higher rank decompositions lead to higher acceptance rates in speculative decoding." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While the method seems interesting and promising, the paper's experiments seem disorganized and insufficient to fully demonstrate the effectiveness of the method.\n\n(1) The majority of the results are for a 56.3M parameter trained on TinyStories, which is a very limited evaluation setting, both because the dataset is synthetic and because the setting involves retraining. There are also some experiments on head-only tuning for PyCodeGPT in Table 3, but the results in that setting are not very strong --- increasing the rank does not actually seem to actually improve inference speed for many of the models. The paper would benefit from more thorough evaluation and stronger results (especially on non-synthetic datasets, and on speeding up existing models rather than requiring retraining: for example, the evaluations done in https://arxiv.org/pdf/2211.17192 (Table 3) would improve this paper).\n\n(2) The majority of the experiments section seems to involve analysis rather than results: only tables 1 and 3 report inference times, which are the main results. I would suggest moving other plots (token acceptance rate, first token vs joint loss, etc.) to a separate analysis section.\n\n(3) There are a substantial number of issues with the experiment design that would be beneficial to address: (a) In Figure 3, it seems like hyperparameters are being selected using the test set; I would suggest using a dev set instead. (b) To make comparisons fair, I would suggest training each rank for the same amount of wall-clock time, rather than number of steps, in case higher ranks require more time per forward pass. (c) The self-speculative setup makes the results hard to interpret because each rank uses a different target model. I would suggest that each method be speculative with respect to the same target model. (d) The paper would be clearer if the experiments were described concretely: for example, the paper states that \"Our measurements were made with seq length varying from 1024\nto 4096\" (lines 408-409), but it's not clear which experiments use which sequence lengths." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.\tIn line 113, the authors denote the input sequence as x_{t:1} and the corresponding embeddings as e_{t:1}. According to the description, the embeddings are the representations of the final transformer layer, while in Figure 1, the same value is denoted as z_t. Do z_t and e_t means the same representation, or e_t means the “input” embeddings? This notation is somewhat confusing.\n\n2.\tAre there any results on the acceptance rate for Llama 8B, not just inference time?\n\n__Typos__:\n\n1.\tIn line 116, a comma is missing before \"the conditional probabilities ...\".\n2.\tIn line 150, \"Note, that\" should be revised to \"Note that\"." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.\tThis work identifies the limitations of recent multi-token prediction standards and proposes a more generalized approach.\n\n2.\tThe experimental results demonstrate the method's effectiveness, and the ablation study underscores the importance of the introduced components." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper focuses on speculative decoding methods that incorporate additional prediction heads. The authors conceptualize current standard approaches as rank-1 canonical tensor decomposition and propose a generalized method that extends from rank-1 to rank-r canonical tensor decomposition to approximate the joint distribution of future tokens. To enhance model training, an auxiliary loss is introduced to address weight imbalances. Experimental results highlight several key findings:\n\n1.\tIncreasing the ranks results in a decrease in joint loss.\n\n2.\tThe first token appears to have no correlation with different ranks.\n\n3.\tThe method is effective even when only the prediction heads are trained.\n\nThe proposed approach achieves notable speedups compared to autoregressive models and rank-1 baselines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tThe work lacks comparison with existing state-of-the-art methods such as Medusa, Eagle, etc., which belong to the same research domain.\n\n2.\tIn the code generation setting, the performance of averaging two accepted draft tokens is not promising.\n\n3.\tThere are several typos in this version that need revision." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "New model for multi-token prediction in transformers based on canonical probability decomposition that improves the sampling efficiency in the self-speculative decoding paradigm without compromising accuracy." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024faster,\ntitle={Faster Language Models with Better Multi-Token Prediction Using Tensor Decomposition},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0EP01yhDlg},\nnote={under review}\n}" }, "abstract": { "value": "We propose a new model for multi-token prediction in transformers, aiming to enhance sampling efficiency without compromising accuracy. Motivated by recent work that predicts the probabilities of subsequent tokens using multiple heads, we connect this approach to rank-1 canonical tensor decomposition. By generalizing it to a rank-r canonical probability decomposition, we develop an improved model that predicts multiple tokens simultaneously. This model can also be interpreted as a mixture of experts, allowing us to leverage successful techniques from that domain for efficient and robust training. Importantly, the overall overhead for training and sampling remains low. Our method demonstrates significant improvements in inference speed for both text and code generation tasks, proving particularly beneficial within the self-speculative decoding paradigm. It maintains its effectiveness across various model sizes and training epochs, highlighting its robustness and scalability." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Large language model", "Self-speculative decoding", "Multi-token prediction", "Low-rank approximation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/6fe4ded46a26ce5b63b0b8d91cd7d350f5112c33.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/8f4f0ebbb86d7ba518a01a0592a741618d66b2dd.zip" }, "title": { "value": "Faster Language Models with Better Multi-Token Prediction Using Tensor Decomposition" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0F1rIKppTf
Through the Looking Glass: Mirror Schrödinger Bridges
main
Active
entropic optimal transport;schrödinger bridge;stochastic differential equations;sampling
probabilistic methods (Bayesian methods, variational inference, sampling, UQ, etc.)
5;5;6;6
3;3;4;3
3;4;2;3
2;3;2;3
3;4;2;3
5.5
3.25
3
2.5
3
0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "What's the connection and difference between the MSB method and the score-matching strategy (like Song et al. 2021)? What's the performance difference?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "By using time-symmetry, MSB only requires a single neural network and half of the computational expense compared to other IPFP-based algorithms." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes mirror Schrodinger bridge (MSB), a model for conditional resampling. An alternating minimization procedure is used to solve for the MSB with a theoretical guarantee. On the empirical side, the MSB method is implemented to sample from both toy distributions and image distribution from the real world." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The theoretical results are limited to asymptotic analysis. The convergence rate is not presented;\n2. In the empirical evaluation, comparison to baseline methods is limited to the Gaussian example in Section 5.1. As a result, it's not clear how MSB compares to other methods in real-world image generation." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. **Error Analysis for AMP**: The paper introduces the Alternating Minimization Procedure (AMP) in Equations (4) and (5) but lacks an error analysis. Could you elaborate on the convergence speed of AMP? Specifically, are there theoretical bounds or guarantees on the convergence rate that would enhance the theoretical foundation of your method?\n\n2. **Benchmarking Against Existing Methods**: Could you clarify which algorithms you compared with your proposed method for each example case in Section 5? Benchmarking against established methods would help situate your approach within existing literature.\n\n3. **Choice of Euler-Maruyama**: Your algorithm employs Euler-Maruyama discretization. How does this choice impact the accuracy and efficiency of solving the Schrödinger bridge problem? Have you considered alternative discretization schemes, and how do they compare in terms of performance?\n\n4. **Iterations vs. Runtime**: How does halving the number of iterations quantitatively affect running time? A breakdown of runtime reductions relative to iteration count would provide a clearer picture of the efficiency gains.\n\n5. **Quantitative Metrics and Proximity Definition**:\n - **Metrics for Resampling Quality**: The paper lacks specific metrics to assess resampling quality in each example case. What metrics do you use to evaluate how well the method preserves the integrity of the original distribution during resampling?\n - **Proximity Definition**: An empirical definition of proximity would clarify how closely generated samples align with input data. For example, on MNIST, how would a proximity value of a digit-5 image with \\(\\sigma=1\\) compare to that of a digit-3 image with a different \\(\\sigma\\)?\n\n6. **Experimental Setup Details**: Although the paper claims empirical validation across various domains, the experimental setup lacks specificity. Could you provide more details on the datasets, experimental procedures, and metrics used to assess performance?\n\n7. **Quantitative Results and Comparisons**: Could you include performance metrics and comparisons that demonstrate the robustness and potential advantages of mirror Schrödinger bridges over existing techniques?\n\n8. **Scalability to High-Dimensional Data**:\n - **Performance on High-Dimensional Benchmarks**: The scalability of your method to high-dimensional data is not discussed. Could you provide insights into its performance and computational complexity on high-dimensional or large-scale datasets?\n - **Strategies for High-Dimensional Data**: What strategies do you propose to ensure the scalability and efficiency of mirror Schrödinger bridges when applied to high-dimensional data or data on manifolds? Are there modifications or optimizations that could improve performance in these scenarios?\n\n9. **Interpretation of Figure 2**: In Figure 2, the resampling estimate with $\\sigma=1$ appears to produce more concentrated samples compared to the original samples. Could you explain this behavior? Is there some form of geometric or manifold enhancement occurring in the resampling process?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The paper introduces the mirror Schrödinger bridge framework, an approach that differs from traditional Schrödinger bridges by focusing on mapping a distribution onto itself rather than between distinct distributions. This self-mapping approach directly addresses the challenge of conditional resampling within the same distribution, opening up new possibilities for generating in-distribution variations of data points. By incorporating time symmetry and the Alternating Minimization Procedure (AMP) to establish theoretical foundations, the paper presents an innovative solution to resampling. The algorithm also leverages time symmetry to train a single neural network for modeling the diffusion process drift, enhancing computational efficiency.\n\nThe paper includes solid theoretical foundations and provides comprehensive convergence proofs in the total variation metric, even in infinite-dimensional state spaces. The AMP is carefully developed and shown to converge to the mirror Schrödinger bridge, ensuring methodological consistency. The algorithm’s implementation is efficient, theoretically reducing computational overhead by half compared to Iterative Proportional Fitting Procedure (IPFP)-based methods. Empirical evaluations across several applications support some theoretical claims, demonstrating the method’s capability to generate high-quality proximal samples for tasks like data augmentation and generative modeling. The organized presentation of theoretical and empirical findings underscores the paper’s contribution to the field.\n\nAdditionally, the paper is well-written and structured, guiding the reader through complex concepts with clarity. The transition from theoretical foundations to practical algorithmic details is seamless, ensuring a coherent flow. Most definitions and problem formulations are clearly presented. Explanations of AMP and iterative schemes enhance understanding, while algorithmic pseudocode supports practical comprehension." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces the mirror Schrödinger bridge, a method for addressing the resampling problem when the initial and target distributions are identical. Unlike traditional Schrödinger bridges, which are designed for mapping between two distinct distributions, the mirror Schrödinger bridge is formulated specifically for self-mapping within a single distribution. This unique approach facilitates the generation of in-distribution variations of data points, allowing for conditional resampling that maintains the original distribution’s integrity.\n\nThe authors develop a theoretical foundation for this method, employing time symmetry and the Alternating Minimization Procedure to establish convergence in the total variation metric, even for infinite-dimensional state spaces. This achievement addresses the challenging issue of convergence in high-dimensional settings. Additionally, the algorithm capitalizes on the time symmetry inherent in the problem, enabling it to model the diffusion drift with a single neural network. This innovation significantly reduces computational costs, effectively halving the effort compared to Iterative Proportional Fitting Procedure based approaches.\n\nEmpirical evaluations underscore the practical value of the mirror Schrödinger bridge across diverse applications, highlighting its capability to produce high-quality proximal samples that are valuable for tasks like data augmentation and generative modeling. In summary, this research claims to provide a theoretically rigorous and computationally efficient solution for conditional resampling within the same distribution, combining solid theoretical contributions with practical algorithmic advancements." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While the paper introduces mirror Schrödinger bridges, the framework retains substantial mathematical similarities to traditional Schrödinger bridges. Could you clarify the key mathematical distinctions between the two approaches? The theoretical analysis would benefit from an error analysis for the Alternating Minimization Procedure (AMP) outlined in equations (4) and (5), which would provide valuable insights into the convergence rate of the proposed approach. The absence of such an analysis limits our understanding of the efficiency and accuracy of the AMP. \n\n\n\nOn the practical side, the algorithm does not include comparisons with other established approaches in the literature, nor is there any discussion regarding the impact or choice of specific discretization methods, such as the Euler-Maruyama scheme, in addressing this problem. Furthermore, the paper lacks a detailed explanation of all the Figures, which hinders the reader's ability to assess how the proposed method performs relative to existing methods and to understand the extent to which halving the iterations reduces runtime quantitatively. \n\nThe paper also lacks a quantitative flow for evaluating the equality of resampling across examples and fails to specify metrics used to assess performance. An empirical definition of \"proximity\" would add clarity. For instance, it is unclear whether a proximity value of 5 remains constant with $\\sigma=1$ or changes to 3 with a different $\\sigma$, and the way proximity is quantified in empirical examples remains vague. Although the paper claims validation across various application domains and provides some information on the experimental setup and datasets, it lacks sufficient detail on the specific metrics used for performance assessment, making it difficult to evaluate the method's effectiveness and robustness relative to existing techniques. \n\nFinally, while the paper emphasizes algorithmic simplifications that reduce computational costs by training only one neural network, it does not address the scalability of the method to high-dimensional data. The absence of any analysis on how the method performs as the data dimensionality increases leaves questions about its applicability to high-dimensional settings, which are increasingly relevant in practical applications." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Comments:\n- Line 053: maybe write what \"\\delta\"-measure means (or just say Dirac measure).\n- The paragraph just above figure 3 is very unclear. I'm not sure what the rows and columns are meant to refer to here..\n- The ability to use one neural network is not surprising from the connection between EOT and the SB problem. Equation (24-25) in the work by Feydy et al. (2019) precisely proposes some kind of fixed-point equation on one potential function (whereas for entropic OT between two measures, there are typically two potentials to optimize over via the Sinkhorn algorithm)\n- Question: Is there any hope to provide a *rule* for choosing the amount of noise added in the process of generating images?\n- Question: The choice of OU process appears entirely arbitrary. Why not consider standard Brownian motion as the reference process?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper proposes to alleviate some computation burden of training SBs by proposing a learning algorithm that learns the SB between a measure and itself. Still faithful to the generative modeling paradigm, they learn the SB from a data distribution to itself, with the goal of starting at existing samples and generating diverse ones. The writing is relatively clear, and Figure 2 is especially clear at describing the phenomenon of \"starting from an existing sample\" and, given enough noise, learns to go somewhere else in the distribution." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work studies the Schrödinger bridge (SB) between a distribution and itself, unlike most of the literature which focuses on the SB between two distributions (e.g., the standard Gaussian, and the data distribution). The authors propose this model as a means for conditional resampling of a distribution, where the \"noise\" induced by the bridge process allows them to obtain new samples which are in-distribution. They demonstrate their approach on many experiments, and have some proofs of their technical results." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While the computational burden of having to train two neural networks instead of one is appealing, there is little comparison between MSB and DSB or DSBM in terms of quantifying sample versatility (they performed some comparisons in the Gaussian case, but these are far from conclusive). For instance, in Figure 3: is there a certain $\\sigma_\\star$ after which the data generated by the MSB changes classes? This is likely hard to prove theoretically, but knowing if there exists some threshold after which data stops being the same class would be very interesting." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "When training, how are the data split? Is there any pairing done between the data points? How would that affect the mapping between the two sets. \n\nAre there other mean reverting processes that you can consider other than OU processes? How do these affect the performance of the method?\n\nIs there a set of results one can consider that consist of analyzing the regularity of the path measures?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The authors provide an interesting perspective on the Schrödinger bridge problem and provide a new technique for fitting a path measure connecting an initial condition given by itself to itself.\n\nThe method is fairly straightforward to implement and say to analyze.\n\nThe method provides optimality with respect to relative entropy, which is a nice property of the sample paths." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors consider a modification to the original Schrödinger bridge problem where they consider the marginals to be the empirical measure of the data. This learns a coupling between two sets of the same data. The coupling is designed to optimal in the relative entropy sense. They modify the iterative proportional fitting procedure such that they project in the direction that minimizes the KL divergence in one step and then in the reverse KL divergence in the next step due to the analytical feasibility of the step. They propose a practical algorithm for computing the projections using a change of measure technique and optimizing the drifts." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My main concerns come with the empirical evaluation of the method. \n\nWhile the method has nice motivation, empirically the results do not seem to be impressive. In the cases of low \\sigma, the variation is very small compared to the initial condition. This is likely because the method is estimating a map between itself that is effectively an OU process. Furthermore, when the $\\sigma$ is large, the methods appear to be much more corrupt and lose some of the important features. \n\nIn general the performance of the method does not seem to be well studied. Since one of the motivations the authors mentioned was based on the path measure being optimal with respect to relative entropy, I would show some of these results on the regularity of the path space." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Conditional resampling by solving the Schrödinger bridge problem between a distribution and itself" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024through,\ntitle={Through the Looking Glass: Mirror Schr\\\"odinger Bridges},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0F1rIKppTf},\nnote={under review}\n}" }, "abstract": { "value": "Resampling from a target measure whose density is unknown is a fundamental problem in mathematical statistics and machine learning. A setting that dominates the machine learning literature consists of learning a map from an easy-to-sample prior, such as the Gaussian distribution, to a target measure. Under this model, samples from the prior are pushed forward to generate a new sample on the target measure, which is often difficult to sample from directly. In this paper, we propose a new model for conditional resampling called mirror Schrödinger bridges. Our key observation is that solving the Schrödinger bridge problem between a distribution and itself provides a natural way to produce new samples from conditional distributions, giving in-distribution variations of an input data point. We show how to efficiently solve this largely overlooked version of the Schrödinger bridge problem. We prove that our proposed method leads to significant algorithmic simplifications over existing alternatives, in addition to providing control over conditioning. Empirically, we demonstrate how these benefits can be leveraged to produce proximal samples in a number of application domains." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "entropic optimal transport", "schrödinger bridge", "stochastic differential equations", "sampling" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/6283ff95f5da61abd862bf58ddf31ffd31ab2236.pdf" }, "presentation": null, "primary_area": { "value": "probabilistic methods (Bayesian methods, variational inference, sampling, UQ, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Through the Looking Glass: Mirror Schrödinger Bridges" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0FK6tzqV76
RTDiff: Reverse Trajectory Synthesis via Diffusion for Offline Reinforcement Learning
main
Active
Reinforcement Learning;Diffusion Model;Reverse Synthesize
reinforcement learning
5;5;5;5
3;4;4;4
2;3;3;2
2;3;2;2
2;3;3;3
5
3.75
2.5
2.25
2.75
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Are the OOD detector and noise management incorporated with other data augmentation baselines, e.g., SynthER and ATraDiff?\n\n2.In Section 6, a very specific environment is adopted to show the advantages of reverse synthesis over normal synthesis, what is the reason/possible explanation that normal synthesis with OOD detector even produces significantly more In2Out than normal synthesis without OOD detector? (18.2%11.2)\n\n3. For other questions, please questions raised in Weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The ideas in this paper are interesting and novel, where to my knowledge, this is the first work that utilizes the concept of generating the reverse trajectories to address the distribution shift issue. The paper is clearly written and well-motivated. The effectiveness of the proposed method is verified in various environments, and ablation studies are also conducted to validate the effectiveness of different components of the proposed method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "To address the distribution shift issue in offline reinforcement learning, this paper proposes a novel diffusion-based data augmentation technique, namely RTDiff, where rather than generating forward trajectories, the reverse one is synthesized, which is also the paper's main contribution. Furthermore, the performance of RTDiff is enhanced with the introduction of trajectory length control and noise management. Experimental results show the effectiveness of the proposed approach." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The main concern of the paper is whether the reverse synthesis can actually address the issue of distribution shift. As it is not state clearly whether the OOD detector is incorporated with other data augmentation baselines, e.g., SynthER and ATraDiff, it is not very certain whether the better performance achievement of RTDiff is due to the reverse synthesis or the using of the OOD detector. In Section 6, an analysis is conduct by using a simple illustrative environment to show why reverse synthesis avoids issues present in normal synthesis. However, on the one hand, it is better to directly use an environment used in the experiments for the analysis, on the other hand, it is confusing why the reverse synthesis generates trajectories that move from the dangerous area to the upper or lower areas while normal synthesis generates trajectories that start from the lower area and enter the middle dangerous area. Do these two approaches both start from a state in the offline data, and then generate the trajectories in different ways? If the OOD detector is used in the normal synthesis, can the dangerous areas also be avoided? Moreover, the theoretical contribution of the proposed is not very significant. \nIf the concerns can be addressed, I would like to raise my score." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Can the authors discuss in detail the differences between this paper and previous similar works (like ROMI)?\n2. This method is built on IQL, TD3BC, and CQL. Have there been any adjustments to the hyperparameters of these methods after using data augmentation?\n3. Is it possible to conduct a quantitative evaluation of the quality of the generated trajectories? For example, assessing the model error of the generated trajectories, etc." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The article is well-written, with clear expression and logic, effectively reflecting the main argument.\n2. The experiments are very comprehensive. The authors validate the effectiveness of their method across a series of tasks, including both proprioceptive observations and visual observations." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes RTDiff, a novel diffusion-based data augmentation technique that synthesizes trajectories in a reverse direction. Such reverse generation naturally mitigates the risk of overestimation by ensuring that the agent avoids planning through unknown states. RTDiff also introduces some other tricks including flexible trajectory control and noise management to improve sythesis quality. Emprirical results show the advantage of reverse generation over forward generation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. I believe the authors miss a key related work (called ROMI [1]), which is the first to propose using reverse trajectory generation in the field of offline reinforcement learning. The motivation described for reverse trajectory generation in these two work is also very similar. Therefore, considering that this paper is neither the first to propose the use of reverse trajectory generation in offline RL nor the first to use diffusion for data augmentation, I would say the novelty of this paper is quite limited.\n2. I think this paper lacks comparisons with model-based offline RL methods in the experimental section. According to my understanding, using a diffusion model for data augmentation essentially falls under the same category as previous model-based offline RL methods. For instance, MOPO [2] essentially generates a batch of synthetic samples to supplement the original samples. Therefore, the authors should compare some model-based offline RL methods, such as MOPO, RAMBO [3], etc.\n\n[1] Wang et al. \"Offline Reinforcement Learning with Reverse Model-based Imagination\" (NeurIPS'21)\n[2] Yu et al. \"MOPO: Model-based Offline Policy Optimization\" (NeurIPS'20)\n[3] Rigter et al. \"RAMBO-RL: Robust Adversarial Model-Based Offline Reinforcement Learning\" (NeurIPS'22)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper is well-written and easy to follow.\n2. RTDiff introduces reverse trajectory generation, an OOD detector, and a noisy control method to achieve efficient and reliable sample generation. These approaches are intuitively sound.\n3. The authors conduct extensive benchmark and ablation experiments. Experimental results demonstrate that RTDiff outperforms previous baselines on both vector-based and pixel-based tasks, and each component—reverse trajectory generation, the OOD detector, and noisy control—exhibits its effectiveness." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a novel diffusion-based data augmentation method, RTDiff, for offline reinforcement learning. First, RTDiff mitigates the data distribution shift issue present in previous data augmentation methods by generating reverse trajectories instead of forward ones. Second, RTDiff trains an out-of-distribution (OOD) detector to truncate the OOD segments of generated trajectories, further enhancing sample authenticity. Finally, the authors propose a new noisy control method to improve sample generation efficiency. Experimental results validate the effectiveness and efficiency of RTDiff and different components across both vector-based and pixel-based tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Reverse trajectory generation is not new to offline RL, and the paper lacks a discussion and experimental comparison of prior works, such as [1].\n2. The paper lacks a clear and well-reasoned explanation of the issues with previous data augmentation methods. In lines 88-90, this paper claims that previous data augmentation methods suffer from the data distribution shift, potentially leading to value overestimation. However, since all of these works use offline RL algorithms, I think data distribution shift is not the key issue. On the contrary, data augmentation is only effective when the generative model can produce samples that differ from the training data.\n3. Generated data fidelity is a more critical factor. The paper lacks a quantitative evaluation of the fidelity of generated samples.\n\n[1] Offline Reinforcement Learning with Reverse Model-based Imagination. NeurIPS, 2021." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The reverse generation method, which has been empirically verified, effectively reduces the augmentation of data in risky regions. This concept is intuitively illustrated in the diagram." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Traditional offline reinforcement learning methods often introduce conservative biases to limit exploration to familiar regions, but this can restrict an agent's ability to generalize. While recent approaches use generative models to expand offline datasets, they can overestimate synthesized data, particularly when it includes out-of-distribution samples. RTDiff is introduced to address this, which is a diffusion-based data augmentation technique that creates trajectories in reverse, moving from unknown to known states. This reverse approach reduces the risk of overestimation by ensuring the agent avoids planning through unfamiliar regions. It also supports generating longer trajectories, utilizing diffusion models effectively while maintaining reliability. RTDiff further optimizes the process with flexible trajectory length control and noise management to improve generation efficiency." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.Previous research [1] introduced a reverse data generation approach using a transition model. In the current work, the vanilla model is replaced with a diffusion model, yet the fundamental concept remains unchanged, limiting the overall contribution.\n\n2.The explanation relies on an intuitive diagram, but it would be more effective to demonstrate several specific cases. Identifying which states are risky. For example, some states are prone to be overestimated and easily generated by a forward model, but the reverse model effectively avoids generating them.\n\n3.Minor errors are present, such as in line 156, where \"dat\" should be corrected to \"data.\"\n\n4.Environments based on visual data should be included in the analysis.\n\n[1] Wang, Jianhao, et al. Offline reinforcement learning with reverse model-based imagination." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024rtdiff,\ntitle={{RTD}iff: Reverse Trajectory Synthesis via Diffusion for Offline Reinforcement Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0FK6tzqV76},\nnote={under review}\n}" }, "abstract": { "value": "In offline reinforcement learning (RL), managing the distribution shift between the learned policy and the static offline dataset is a persistent challenge that can result in overestimated values and suboptimal policies. Traditional offline RL methods address this by introducing conservative biases that limit exploration to well-understood regions, but they often overly restrict the agent's generalization capabilities. Recent work has sought to generate trajectories using generative models to augment the offline dataset, yet these methods still struggle with overestimating synthesized data, especially when out-of-distribution samples are produced. To overcome this issue, we propose RTDiff, a novel diffusion-based data augmentation technique that synthesizes trajectories *in reverse*, moving from unknown to known states. Such reverse generation naturally mitigates the risk of overestimation by ensuring that the agent avoids planning through unknown states. Additionally, reverse trajectory synthesis allows us to generate longer, more informative trajectories that take full advantage of diffusion models' generative strengths while ensuring reliability. We further enhance RTDiff by introducing flexible trajectory length control and improving the efficiency of the generation process through noise management. Our empirical results show that RTDiff significantly improves the performance of several state-of-the-art offline RL algorithms across diverse environments, achieving consistent and superior results by effectively overcoming distribution shift." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Reinforcement Learning", "Diffusion Model", "Reverse Synthesize" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/d2871f8fbecca7332011d381040dfe22407a2f63.pdf" }, "presentation": null, "primary_area": { "value": "reinforcement learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/15a8e35d680b784653f8ee42d8199420a0277013.pdf" }, "title": { "value": "RTDiff: Reverse Trajectory Synthesis via Diffusion for Offline Reinforcement Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0FbzC7B9xI
Truncation Is All You Need: Improved Sampling Of Diffusion Models For Physics-Based Simulations
main
Active
physics-based simulations;diffusion models;improved sampling
applications to physical sciences (physics, chemistry, biology, etc.)
5;5;5;6;6
3;3;4;4;4
2;3;3;3;3
2;2;2;3;3
3;2;3;3;3
5.4
3.6
2.8
2.4
2.8
0.666667
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Does s=1 mean that the model is trained as a single variational autoencoder? Or is this truncation only used for sampling? (paragraph about the training is not clear to me)\n\n- naive question for my understanding: At the beginning of Section 5.1, the authors say that they consider time series of flow field from j= 1 to T. It is not clear how the time series are handled here. Could the authors clarify this point?\n\n- How do the authors handle the high fluctuations areas of the domain? It seems that some region of the domain have a highly turbulence flow field (low pressure vortex) and this would require a more flexible model in this specific area. \n\n- Did the authors try to change the value of the initial input $x_{init}$? \n\nI look forward to reading the answers of the authors and I can change my score depending on their answers." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper reads very well and provides some good contributions to the development of diffusion models. \n\n - Although the presented approach is based on some known results (Tweetie's formula, ancestral sampling), the proposed solution seems very efficient in practice and leads to lower computational costs. Both idea's exploit the denoising Tweedie's formula in forward and backward sampling.\n\n - Including the truncation in the training is smart trick that eases the training. \n\n - The numerical experiments show that the presented approaches achieve better performances than traditional DDPM sampling and reduce the computational costs.\n\n- The approach seem to be very efficient in physic based simulation of flow field. I think this is a very good contribution in this specific domain." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a truncation approach and an iterative refinement process in the sampling procedure of the Denoising Diffusion Probabilistic Model (DDPM) that enable to reduce the number of function evaluations without decreasing the accuracy. The first method proposes to stop the sampling process at an earlier time point and to estimate the denoised sample using Tweedie's formula. The second method uses the forward diffusion for a given shorter noise schedule and the denoised sample is approximated using Tweedie's formula. The authors show the efficiency of the approach on the simulations of airflow field." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Although the approach seems efficient in the presented numerical experiments, there is a number of points that need to be clarified. \n\n- Tweedie's formula is well known. It seems that this has been already used in some previous works such of [Delbracio et Milanfar, 2024]. In their work, the authors provide some intermediate reconstructions through this formula. They show that by adding some stochastic steps \nthey can get better performances than state-of-the-art. I know the model is not the same but it would have been interesting to highlight the links with this work because they seem very closely related. May the authors comment on that point?\n\n- The results show that TS outperforms traditional DDPM by using s=1. This means that on this specific problem, there is no need to sample intermediate diffusion steps. I wonder whether this aspect is problem specific or this happens for a wider range of problems. Is this result expected? \n\n- This would have been interesting to see how this method perform against traditional DDPM sampling on image reconstruction. Indeed, DDPMs usually perform well on such problems.\n\n- The training data are deterministic sequences of flow field data. It would have been interesting to observe how the model performs on noisy dataset. \n\n- Overall, I like the idea of truncation and iterative refinement, but since there is no major theoretical contribution in this work, I would have liked to see more numerical results. The claim \"Truncation is all you need\" would have been justified if the authors had included numerical results on different applications. So far, the paper makes very good contribution in this specific domain, and proposes an interesting approach to reduce computational costs.\n\nDelbracio et Milanfar, 2024, Inversion by Direct Iteration: An Alternative to Denoising Diffusion for Image Restoration. TMLR." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "None. This is original work and there are no ethical issues" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The experimental results are very good, and I hope to add more theoretical analysis. I will be happy to improve my score in subsequent discussions." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Advantages:\n\n1. As we all know, if we consider an infinite boundary heat equation (diffusion process), or consider long-term diffusion, then the usefulness of a long part of the noise addition/noise reduction behavior is not that great. The article fully considers and utilizes the diffusion behavior in a finite time (truncation), thus achieving a balance between accuracy and running time, which is a very good point.\n\n2. Truncated sampling reduces the uncertainty of the sample to a certain extent, or increases the accuracy of the sample response distribution.\n\n3. Based on the content of the appendix(in particular, part D), the experimental effect is very significant. In other words, iterative refinement even makes up for the truncated sampling to a certain extent (the useful information and samples that are truncated)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This article first analyzes DDPM and finds that after an appropriate truncation (stop diffusion), the model has high fidelity and high-efficiency sampling performance. On this basis, an iterative refinement method is introduced to further improve accuracy and long-term stability." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Disadvantages:\n\n1. I want to know whether there is a mathematical inference for the truncation sampling standard, or whether it is completely based on experience, that is, truncation and retention (importance) interpretability.\n\n2. The purpose of refinement iteration and truncation sampling is to improve efficiency while ensuring a certain degree of accuracy. I think this requires a game. How to achieve such a balance? Is there a more rigorous mathematical explanation?\n\n3. I think experiments can increase the breadth. One is to compare with a more general SDE instead of just with DDPM (and Kohl's 2024 work). In addition, for experiments, do you consider more general PDE solutions?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "**Q1:** Given that PDE-refiner is conceptually relatively similar to iterative refinement, I am surprised to see a quite large performance difference between the two methods in Appendix D.1. Can the authors explain the reasons behind this large performance gap?\n\n**Q2:** The truncation of the last steps of the reverse diffusion process seems to be equivalent to a modification of the noising schedule: we can choose the noise schedule $\\beta_t$ such that the first step in the forward diffusion process has already a quite low signal-to-noise ratio (in line with the level corresponding to the last step before truncation in the reverse process), and afterwards noise is added gradually as per usual, while reducing the total amount of steps in the forward process in line with the skip percentage. Can the authors provide their thoughts on this perspective and whether or not they agree? If they agree, can they comment on why the noise schedule that is equivalent to the truncated process is a good choice for this problem setting relative to other problem settings, and in this way place their contribution in the broader context of noise schedules?\n\n**Q3:** Please comment on W1-W3." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "**S1:** The experimental setup is rigorous, comparing methods in both pointwise metrics and relevant physics-based metrics to provide complementary perspectives on their performance for three relevant datasets. Moreover, many additional results and baselines can be found in the appendix, making an extensive empirical evaluation overall.\n\n**S2:** Two methodological contributions are evaluated: truncation of the last steps of the reverse diffusion process, and iterative refinement, which optimizes the inference sampling schedule such that less denoising steps are required. Both contributions are aimed at reducing the number of function evaluations to improve computational complexity of diffusion models for physics simulations. This is a relevant research direction, since reducing computational complexity of computational procedures is one of the primary use-cases of neural simulation models, for which diffusion is emerging as a promising modeling approach. \n\n**S3:** The paper is well-written, and the explanations of the proposed algorithms are intuitive and easy to follow and understand. The clear structure of the text helps the reader to efficiently navigate the paper." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the application of diffusion models to physics simulations. Over the past years, neural networks have emerged as surrogate modeling approach for physics simulations, with a key use-case being computationally efficient inference. However, for this purpose, diffusion models have the drawback of requiring many function evaluations due to their iterative ancestral sampling procedure. To this end, the authors propose two contributions: (1) truncation of the last steps of the reverse diffusion process, and (2) iterative refinement, which considers a much shorter noise schedule at inference time. Both methods reduce the number of function evaluations and thereby increase sampling speeds. Moreover, the empirical results demonstrate that accuracy is generally maintained and sometimes even improved compared to standard expensive sampling procedures." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**W1:** While reading the text, I found it difficult to distill what the key differences between iterative refinement and PDE refiner are (Lippe et al., 2023). Does it have to do with the greedy optimization method of the refinement schedule (to my knowledge PDE refiner uses a fixed schedule), or details in the formulation of the diffusion process (a nonzero vs zero drift term in IR and PDE refiner respectively), or something else? Since both IR and PDE refiner are quite similar, it would be good if the ‘method novelty’ paragraph explicitly contrasts the two approaches and highlights their differences. Additionally, if the greedy optimization of $\\gamma$ is a core novelty relative to PDE refiner, then it would be beneficial to explain this more elaborately in the main text rather than the appendix, since it would be a key aspect of one of the contributions in this case.\n\n**W2:** One of the goals of the paper is to show that the proposed methods close the gap between the diffusion models and deterministic baselines. However, most of the results in the main text (both tables and plots) focus only on diffusion models. It would be relatively straightforward to also show the results of one or two deterministic methods that are considered by the authors in part of the plots and tables, for example in Figure 2 and Table 2. This would help the reader to get a better understanding of the tradeoffs of existing diffusion-based approaches, deterministic approaches, and the proposed methods without taking additional space in the paper.\n\n**W3:** The conditioning on the autoregressive step size (j in Sec. 2 of the paper) is already introduced in Gupta et al (2022), and as such cannot be claimed as a contribution of the paper (currently point 1 of the contributions listed in the introduction). Since this is not a core point in the rest of the paper, it seems that this can straightforwardly be removed from the list of contributions in the introduction without affecting the rest of the work and the core contributions significantly.\n\n**References:**\n\nGupta, J. K., & Brandstetter, J. (2022). Towards multi-spatiotemporal-scale generalized pde modeling. arXiv preprint arXiv:2209.15616.\n\nLippe, P., Veeling, B., Perdikaris, P., Turner, R., & Brandstetter, J. (2023). Pde-refiner: Achieving accurate long rollouts with neural pde solvers. Advances in Neural Information Processing Systems, 36." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "See above." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Could you theoretically or intuitively clarify the effectiveness of the pre-trained diffusion model in the IR sampling procedure at noise level $t = \\gamma $? The distribution $x_{init}$ at noise level $t = \\gamma $ appears to differ from the distribution $x_0$ at the same level $t = \\gamma $. Additionally, in Equation 6, is it ensured that the error between the final output and the $x_0$ remains sufficiently small, such that $E[‖x _0^N - x_0 ‖_2 ]<\\epsilon $?\n\n- Could you also clarify (experimental support would be helpful) line 288 (2), which states that IR sampling does not require data consistency to $\\hat{x}_0$? I mean why the proposed method does not require the data-consistency? Enforcing data consistency to $\\hat{x}_0$ could be plugged in after line 7 in Algorithm 2 to improve accuracy in a single iteration, without compromising the number of iterations needed. Also, related to the above question, data consistency could help reduce the error towards zero, such as [1] and [2]. \n\n[1] A physics-informed diffusion model for high-fidelity flow field reconstruction, 2023.\n\n[2] Diffusionpde: Generative pde-solving under partial observation, 2024." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Interesting topics – Generative diffusion models as surrogate model for fluid simulations.\n- Clear motivation – Reduce computation costs in the generative process using a novel reverse sampling approach." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces Truncated Sampling Models (TSMs) and Iterative Refinement (IR) to improve the efficiency and fidelity of Denoising Diffusion Probabilistic Models (DDPMs) for fluid simulations by enabling reduced steps sampling through truncation of the diffusion process. These methods significantly reduce inference time and improve stability over long rollout horizons for turbulent flow and airfoil simulations." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper lacks clarification of superiority of the proposed methods over other surrogate models, such as neural operators which are currently the most widely used ML-based surrogate models in fluid dynamics for speed and accuracy. Given that a primary contribution of this paper is reducing time costs, a more thorough comparison with advanced neural operators – either highlighting the proposed method’s improved accuracy at similar time costs or its time efficiency at comparable accuracy – would strengthen the argument. However, there are few comparisons with neural operators in the experiments in main text; although Unet is included, more advanced neural operators should be included. Additionally, the proposed method does not appear to clearly outperform Unet.\n- The title may mislead some readers, as “physics-based simulations” implies a broad range of applications, while the paper is mostly on fluid dynamics. To improve clarity, I recommend replacing “physics-based simulations” to “Fluid Dynamics Simulations”. Alternatively, the authors could clarify whether they intend to generalize their approach to other physics-based simulations beyond fluid dynamics or provide examples of how the method could be applicable to other physics domains." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. **Regarding W1** - I’d be curious to see how the methods perform on dynamical systems with more complicated nonlinear dynamics, for example the Kuramoto-Sivashinsky equation used in PDE-Refiner (Lippe et al. [3]). That is a fourth-order non-linear equation where correctly capturing the high frequencies seems to be more important than in other settings, so it would be interesting to see how you compare there with benchmarks such as PDE-Refiner [3] (which can also be interpreted as a refinement of an MSE-trained one-step prediction). \n2. **Regarding W2** - Do you have any intuition why these methods outperform DDPM and whether the DDPM baseline could be improved?\n3. **Regarding W4** - Could you provide some experiments that test the long rollout performance of these sampling methods vs. traditional ones? Including frequency spectra of generated states would also help.\n4. **Regarding Minor 2** - It seems that in general the optimum $j$ for these methods is between [2, 4], but have you noticed any significant patterns? Were there differences between interpolation and extrapolation tasks?\n5. **Eq (5).** I am not sure I understand this equation. I will omit the $\\theta$ in the $p_{\\theta}$ subscript because the equations do not render correctly. If $p^T(x_T, \\mathbf{x}_0, j) = \\mathbf{x}(j \\cdot \\delta t)$, then wouldn’t $\\mathbf{x}(2j \\cdot \\delta t) = p^T(x_T, \\mathbf{x}(j \\cdot \\delta t), j) = p^T(x_T, p^T(x_T, \\mathbf{x}_0, j), j)$ (i.e., we still start from white noise $x_T$, but we now condition on the output of the previous step)? In my mind $\\mathbf{x}(\\tau_f) = p^T(x_T, p^T(...p^T(x_T, \\mathbf{x}_0, j)...), j)$, but maybe I didn’t interpret the equation correctly.\n6. When comparing the results in Table 5 vs. Tables 6 and 7 - Shouldn’t the metrics corresponding to, for example, Fturb TSM T80 s =0.75 ($j=2$) from Table 5 ($3.63 \\pm 1.95$) be the same as in TSM T80, s=0.75 with optimal $j=2$ ($3.43 \\pm 3.03$) Table 7? What’s the difference between these settings?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. **Relevant topic.** Lately, there has been increasing interest in modelling dynamical systems with diffusion models due to their probabilistic nature. However, many works (Kohl et al. [1], Shysheya et al. [2], Lippe et al. [3]) acknowledge that diffusion models tend to be computationally costly, and techniques that reduce this cost would be greatly beneficial. This is exactly the problem this paper aims to address.\n2. **Clear distinction from other works.** The paper clearly delineates its contributions from the already existing work, and how the proposed sampling techniques differ from other approaches.\n3. **Good experimental evidence.** The paper provides good empirical evidence, with experiments on three diverse datasets, and using a wide range of metrics.\n4. **Well-structured, clear writing.** Overall, I found the structure and writing clear.\n\n[1] Kohl, G., Chen, L., & Thuerey, N. (2023). Benchmarking Autoregressive Conditional Diffusion Models for Turbulent Flow Simulation.\n\n[2] Shysheya, A., Diaconu, C., Bergamin, F., Perdikaris, P., Hern'andez-Lobato, J.M., Turner, R.E., & Mathieu, E. (2024). On conditional diffusion models for PDE simulations.\n\n[3] Lippe, P., Veeling, B.S., Perdikaris, P., Turner, R.E., & Brandstetter, J. (2023). PDE-Refiner: Achieving Accurate Long Rollouts with Neural PDE Solvers. ArXiv, abs/2308.05732." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper addresses the high computational requirements of existing diffusion-based techniques for modelling dynamical systems. They propose two sampling techniques that lead to good sample quality with only a few NFEs. The first one requires modifications to the training process and is performed by truncating the diffusion process close to the clean data. The second one is compatible with pre-trained DDPMs and proposes an iterative refinement based on Tweedie’s formula. The paper provides extensive experimental evidence on three datasets: incompressible and compressible turbulent flow (2D) and airfoil flow simulation (3D)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. **Unclear how the method generalises to other settings.** I acknowledge that the aim of the paper is to study the efficacy of the proposed sampling methods in the context of dynamical systems. As the authors mention, the fact that they lead to good results is probably due to:\n- The characteristics of the data distribution—states with fairly coarse resolutions, where the high frequency information has been lost during downsampling.\n- The task considered—predicting next step (forecasting) distribution, which is predominantly unimodal.\n \n I’d be curious to know whether the sampling methods remain applicable \n - for other tasks that might require modelling multimodal distributions: e.g. some sort of inverse problem (reconstruct field based on sparse, partial observations, compatible with multiple solutions), which is also a task of interest in dynamical system modelling, \n - or for other datasets: potentially still PDEs but not as coarsened and with more complicated nonlinear behaviour, or images. \n\n One could argue that Air_multi requires sampling from a more complicated distribution, and there the improvements are not as pronounced as in other settings. Perhaps you could include a brief discussion on this, or a preliminary experiment on a more complex PDE dataset, such as Kuramoto-Sivashinsky (see **Q1**)?\n2. **Counter-intuitive comparison to DDPM.** Maybe this is not necessarily a weakness, but I find it counter-intuitive that these sampling methods outperform DDPM, given that DDPM (or at least the continuous-time frame formulation) has stronger theoretical foundations (Similarly to how DDIM is posed as a framework where you can trade off computation for sample quality, not gain in both). I would have expected the same accuracy with significantly fewer NFEs to be possible, but not both. Is it possible that the DDPM model could be further tuned to achieve similar performance and maybe the DDPM baselines are not optimal? Maybe this is where a comparison with EDM would have been beneficial, as it provides a more principled approach of setting up diffusion models.\n3. **Lack of theoretical guarantees.** While the methods seem effective in practice, the paper does not provide any theoretical guarantees. I realise this might be hard to derive, but this makes the applicability of the methods more ad-hoc. This also reflects in the empirical investigation, where there is no clear recipe for what works best and how one should choose the hyperparameters optimally.\nAlso when making statements such as “However, we assert that\nour methods will consistently improve over ancestral sampling, as demonstrated in our experiments.” - This was only shown in three experiments, and the paper does not contain guarantees that this would always hold, so I would avoid over-generalising.\n4. **Lack of analysis of the stability of longer rollouts.** The paper provides several metrics to analyse the performance of the sampling methods, but none provides intuition/results about how the metrics evolve in time for the transient datasets (Tra and Fturb) (e.g. per-time-step MSE or per-time-step correlation). In particular, I’d be interested to compare the performance of the benchmarks to your methods on rollouts longer than what the model has been trained on. (This could, for instance, be tested on Tra_long from Kohl et al. [1]).\n \n I think that one potential weakness of these sampling schemes is that they lose more of the high frequency information than the traditional sampling schemes. Maybe this doesn’t affect the short rollouts significantly but it might negatively impact the accuracy of longer term rollouts (see Lippe et al. [3]). It would also be good to check that if extended significantly beyond the training range, the proposed sampling methods still generate physically plausible states, and outperform the baselines.\n5. **Lack of a comparison to EDM.** While I agree that a comparison to, for example, distillation techniques is outside the scope of the paper, I think EDM is relevant as a baseline. The fact that it is “designed to handle more complex stochastic data with multimodal distribution” does not mean it is not relevant for cases that do not exhibit much stochasticity. And it shouldn’t incur a different computational cost at training time. I think a comparison to EDM would be very useful to the community to figure out what the fastest and most accurate way to set up diffusion models for dynamical systems is. If the methods proposed here outperform EDM, this makes the paper stronger. If they don’t, I think this would also be a valuable result, potentially implying that EDM is a robust technique (regardless of data distribution) that should be used as a “first thing to try” as opposed to spending time and resources on hyperparameter tuning of more ad-hoc techniques.\n Maybe the authors could include a comparison to EDM on one or two experiments?\n6. **Unclear experimental setup.** I found it hard to figure out some experimental details in certain places. I am aware these details exist in other papers (Kohl et al. [1]), but for ease of interpreting the results, it would help to include some more details, potentially as a brief table in the appendix which summarises the most important dataset characteristics. For example: \n - Tra - What is the Mach number range of the training trajectories? Is it also Ma $\\in [0.53, 0.63] \\cup [0.69, 0.90]$ as in Kohl et al.?\n - Tra - How many trajectories are there per Mach number?\n - Fturb - I am slightly confused about the number of states within each trajectory for this dataset. You mention that each simulation contains 51 temporal states, but do you just consider 30 out of these for the test results? And what do you mean by “AR sampling is employed for $R = 30$ timesteps … for two sequences per $Re$?”\n - Fturb - you are feeding in the $Re$ number as conditioning information to the model as in Kohl et al., right?\n\n**Minor**\n\n7. **Comparison to DDIM (L306)** - You say that “IR should supersede the deterministic DDIM sampling regarding accuracy and NFEs” due to its stochastic nature. While I agree that stochastic sampling is beneficial because it can correct previous errors in sampling, Karras et al. [4] mention that, in practice, the situation is more complex because approximating the extra Langevin term introduces error in itself (see Section 4 Stochastic sampling). Thus, I would not say there is any guarantee that IR would supersede DDIM in all scenarios.\n8. **Lack of discussion on $j$.** I find it interesting that the optimum $j$ value varied so much between methods, as shown in Tables 6 and 7, but you do not include any discussion about this. Do you have any insight about why this might be the case?\n9. **Small typos**, such as L291 “to be evaluated”, L776 “presnted”, L772 “where” missing at the beginning of the line, L819 “allow” rather than “allows”, L840 “are not needed” etc.\n\nOverall, I think the paper is clearly written and structured, and generally presents convincing empirical evidence. However, I think its quality could be significantly improved by including experiments on longer rollouts, a comparison to EDM, and potentially clarifying the regimes in which the techniques are effective (with the inclusion of negative results if necessary).\n\n[4] Karras, T., Aittala, M., Aila, T., & Laine, S. (2022). Elucidating the Design Space of Diffusion-Based Generative Models. ArXiv, abs/2206.00364." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose an early truncation algorithm and an iterative refinement method to let diffusion models produce highly accurate results with low NFEs." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024truncation,\ntitle={Truncation Is All You Need: Improved Sampling Of Diffusion Models For Physics-Based Simulations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0FbzC7B9xI},\nnote={under review}\n}" }, "abstract": { "value": "State-of-the-art Denoising Diffusion Probabilistic Models (DDPMs) rely on an expensive sampling process with a large Number of Function Evaluations (NFEs) to provide high-fidelity predictions. This computational bottleneck renders diffusion models less appealing as surrogates for the spatio-temporal prediction of physics-based problems with long rollout horizons. We propose Truncated Sampling Models, enabling single-step and few-step sampling with elevated fidelity by simple truncation of the diffusion process, reducing the gap between DDPMs and deterministic single-step approaches. We also introduce a novel approach, Iterative Refinement, to sample pre-trained DDPMs by reformulating the generative process as a refinement process with few sampling steps. Both proposed methods enable significant improvements in accuracy compared to DDPMs with NFEs $\\leq$ 10 on a diverse set of experiments, including incompressible and compressible turbulent flow and airfoil flow uncertainty simulations. Our proposed methods provide stable predictions for long rollout horizons in time-dependent problems and are able to learn all modes of the data distribution in steady-state problems with high uncertainty." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "physics-based simulations", "diffusion models", "improved sampling" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/487ead27e5e1a14dabe2352bcd3f95c789c0c375.pdf" }, "presentation": null, "primary_area": { "value": "applications to physical sciences (physics, chemistry, biology, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/ede03c067ebcd603195a870e18e6b1be7856a020.zip" }, "title": { "value": "Truncation Is All You Need: Improved Sampling Of Diffusion Models For Physics-Based Simulations" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0Fi3u4RCyU
Evolve: Evaluating and Optimizing LLMs For Exploration
main
Active
Large Language Model;Exploration
foundation or frontier models, including LLMs
5;5;6;8
4;4;4;4
4;2;3;4
2;2;3;3
4;2;3;4
6
4
3.25
2.5
3.25
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see the weakness part.\n1. Given that the results in Table 1 suggest that the use of Algorithmic Guidance (AG) does not lead to consistent improvements in MAB scenarios, could you provide further insights into the specific conditions under which SH and AG are most effective (especially compared with UCB or LinUCB)? \n2. Since the results in Figure 4 indicate that in-context demonstration performs better in some cases (e.g., Bernoulli Video and Summarized History) while fine-tuning is more effective in others (e.g., Bernoulli Clothes and Raw History), could you provide further analysis to help guide the selection of the most appropriate method in practical applications? Besides, could you clarify the numeric similarities observed in Figure 4?\n3. How well do the proposed methods generalize to domains with much larger action spaces, such as real-world recommendation systems that involve thousands of items or more complex decision-making problems where exploration becomes more challenging due to the increased task size and complexity?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper contributes to a relatively underexplored area by focusing on in-context exploration for LLMs in multi-armed bandit and contextual bandit settings. While LLMs are traditionally used for predictive tasks, this work broadens their application to optimal decision-making under uncertainty.\n2. The introduction of BanditBench provides a structured benchmark for evaluating LLMs in decision-making tasks that require exploration and exploitation. \n3. The proposed methods, including inference-time algorithmic guidance and algorithmic distillation, are well-motivated." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper explores the ability of large language models to perform optimal decision-making under uncertainty through in-context exploration in multi-armed bandit and contextual bandit settings. This work introduces BanditBench, a comprehensive benchmark suite designed to evaluate LLMs in various bandit tasks. They propose two approaches to make use of bandit algorithms: (1) inference-time algorithmic guidance using established algorithms like UCB and (2) algorithmic distillation, where optimal behavior from algorithms is distilled into LLMs through few-shot demonstrations or fine-tuning. They also show the influence of different factors by conducting the ablation experiments." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. While the use of Summarized History (SH) and Algorithmic Guidance (AG) to enhance the exploration capabilities of LLMs is an intriguing direction, it is important to note that the results in Table 1 indicate that the application of AG in MAB scenarios does not yield consistent improvements and that its performance remains relatively low compared to traditional bandit algorithms (UCB, LinUCB). Additionally, employing AG introduces extra computational overhead. A more detailed discussion of the effects of AG would be beneficial for understanding its role more clearly.\n2. The experimental analysis shows mixed results, especially in approaches for knowledge distillation with In-context Demonstration and Optimal Behavior Fine-Tuning for different model sizes and task difficulties. Specifically, in Figure 4, the results across various tasks and methods exhibit oddly similar numerical values (e.g., 0.487, 0.636, 0.267). A deeper investigation into the reasons behind these results could enhance the applicability of the proposed approaches in real-world scenarios.\n3. The experiments are primarily focused on two specific domains (clothing and movie recommendations) with relatively small action spaces. It's unclear how well the proposed methods generalize to domains with much larger action spaces (e.g., thousands of items in real-world recommendation systems) or other decision-making problems where exploration could be more challenging due to the size and complexity of the task." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. Why is OFT in Figure 2 present only for Gemini 1.5 Flash?\n2. Any idea how the LLMs perform in larger action spaces? I can imagine that many real-world applications go well beyond K=30, and any discussion on these scaling laws would be very helpful. This may not be intuitive as we would need to deal with issues such as limited context window and whether LLM can correctly synthesize the information from larger contexts.\n3. Based on Figure 5, Gemma models perform terribly in exploration, even with all the techniques introduced in the paper. Do you have any explanation/hypotheses on why this is the case? Is it because of the model sizes?\n4. How practical is it to use LLMs for such explicit exploration? If you have explicit actions, it seems easier to use RAG with UCB/Thompson Sampling baked into the external retrieval system, resulting in optimal exploration." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The paper is well-structured and easy to read. It extends the idea of Krishnamurthy et al. (2024) to contextual bandits, which is an important step for many practical applications.\n\nThe LLM evaluation methodology is sound and uses the MovieLens dataset, which I find a good fit for LLM exploration. I especially like the functional interpretation in Section 6, which allows us to compare LLM exploration capabilities to the established bandit algorithms, which clearly shows the LLMs are (unsurprisingly) lagging behind. This gives the paper a much stronger position, not overselling its ideas and showing the areas needed for improvement.\n\nOverall, I think there are a lot of novel ideas, and provided the authors release the source code, the ICLR community can build on this.\n\n---\nKrishnamurthy, Akshay, et al. \"Can large language models explore in-context?.\" arXiv preprint arXiv:2403.15371 (2024)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors develop the BanditBench benchmark, which evaluates LLMs' abilities to explore and converge to optimal actions through the multi-armed bandit framework. They comprehensively evaluate the suite of Gemma and Gemini 1.5 models and propose two techniques to boost the LLMs' exploration abilities further." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "In MAB, I would like to see a setting with variable sigma for each action, as the exploration problem for the LLMs might get easier when all of the actions share the same variance.\n\nI find the MovieLens dataset very simplified if the maximum number of actions is set at K=30 (see questions)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "See above." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "In-context reinforcement learning is an important and interesting problem, and multi-armed bandits & contextual bandits are an important building block in this direction. The authors propose several mitigations to improve the ability of LLMs to explore in these settings. Moreover, the paper is well-written and the multi-armed bandit experiments are comprehensive." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This submission studies the problem of in-context exploration, where an LLM interacts with a bandit environment, and its history of observations and interactions with the environment are given in-context. The LLM agent then decides its next action based on this given context. Two forms of history are considered: raw history, in which the entire history is given in-context and summarized history, where summary statistics are pre-computed and given in-context instead. \n\nThe authors call their framework BanditBench. They consider both stochastic multi-armed bandit and contextual bandit instances. For multi-armed bandits, they consider two action descriptions: choosing between different videos and different clothes. They also consider two reward distributions: Gaussian and Bernoulli. For contextual bandits, they construct their instances from the MovieLens dataset. The MovieLens dataset contains 10,000 real users’ movie ratings. In the constructed contextual bandit instance, the goal is to recommend a personalized movie that the specific user seen at the current round will enjoy. The LLM is given textual features, as well as numerical features taken from a low-rank approximation of each user’s rating matrix as the context in each round. \n\nThe authors propose two mitigations to improve the exploratory behavior of LLMs in bandit tasks. Both methods leverage the behavior of optimal bandit algorithms. For the purposes of this submission, the optimal bandit algorithm considered is UCB for multi-armed bandits and LinUCB for contextual bandits. In inference-time algorithmic guided support (the authors’ first proposed mitigation), the LLM is given the explore/exploit components of UCB/LinUCB at each time step. (E.g. for UCB, this is the empirical average reward and the ‘exploration bonus’ for each arm.) For algorithmic distillation (the authors’ second proposed mitigation), UCB/LinUCB trajectories are given either in-context or via fine-tuning. \n\nThe authors empirically evaluate Gemma-2B, Gemma-9B, Gemini 1.5 Flash, and Gemini 1.5 Pro on 16 multi-armed bandit and 2 contextual bandit tasks. They compare the performance of different models via pariwise win rate. They find that, perhaps surprisingly, few-shot learning boosts Flash’s performance while hurting Pro’s. They also find that fine-tuning significantly improves performance over few-shot learning, and leveraging inference-time support significantly improves performance across all models. Various ablations are also performed." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While the multi-armed bandit experiments are thorough, their novelty is somewhat limited as (as the authors point out), Krishnamurthy et al. 2024 study a very similar multi-armed bandit setting. While the multi-armed bandit results in this submission are more comprehensive, their findings are similar to Krishnamurthy et al.\n\nThe authors do include contextual bandit experiments (which are not present in Krishnamurthy et al.), but they are less comprehensive than the multi-armed bandit experiments. \n\nFinally, I am not fully convinced by the authors proposed mitigations. If we give LLMs things which make it easier for them to compute an upper-confidence bound, are we testing the LLMs’ ability to explore, or their ability to implement UCB? One reason why in-context exploration is interesting is because of the complex structure of real-world decision-making tasks. While it is natural to test LLMs’ exploration abilities on simple multi-armed bandit and contextual bandit tasks, we already have optimal algorithms for these domains and so deploying LLMs in such simple settings is not the end goal. Given that UCB is often suboptimal in structured bandit tasks beyond the two studied in this work, do you believe your proposed mitigations will extend to more complicated tasks?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Is \"Optimal Behavior Fine-Tuning\" what is known in the literature as Behavioral Cloning? If so, please change the name in your paper. It can be confusing to a reader \n\n- Can the applicability of BanditBench be extended to other decision-making scenarios beyond bandit settings? Can you add some discussion about it in the paper (if you find some space, otherwise in the appendix)? I feel like recently LLM agents in more complex domains such as MDPs are very relevant and may be very useful in many real-world applications. Notice however that I believe that a BanditBench is absolutely needed, even if it is a simplified MDP version, because it allows to analyze more carefully the exploration-exploitation trade-off in LLM bandits." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Addresses an important area of LLMs in decision-making tasks: this paper faces a very timely topic. LLM agents are an important research direction that has recently seen a surge in popularity. New research in this area is fundamental in order to better understand the behavior of LLMs when they face decision-making problems under uncertainty. \n\n- New benchmark: The paper introduces BanditBench, which is a novel benchmark for evaluating LLM exploration abilities. A benchmark in this research area is fundamental. Many papers in this area have different experimental settings. This makes it hard to compare them and for the whole research community to make reliable progress. For this reason, a benchmark on LLM agents is fundamental.\n\n- Empirical evaluation: The paper also conducts comprehensive empirical evaluations and ablation studies on the proposed benchmark. I think that these results are interesting for the research community." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper examines the ability of large language models (LLMs) to perform decision-making tasks. In particular, it is focused on Multi-Armed Bandit (MAB) and Contextual Bandit (CB) problems. The paper introduces BanditBench, a benchmark suite for evaluating large language models in decision-making tasks within bandit environments. It also proposes two approaches to enhance LLM exploration: inference-time algorithmic guided support and algorithmic distillation through in-context demonstrations and fine-tuning using synthetic data generated from optimal algorithms. Results show interesting behavior of LLM-agents in bandit tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Lack of novelty in some of the contributions: While I believe that BanditBench is a great contribution, the other claim of this paper is: \"[...] we propose methods to enhance LLM’s decision-making capability by leveraging optimal algorithms, including algorithmic guided inference-time support and algorithmic distillation approach\". The proposed approaches, however, seem to lack of novelty. \nIn particular, the technique that the paper calls \"Optimal Behavior Fine-Tuning\" seems to be exactly what is known in the literature as Behavioral Cloning. \"In-Context Few-Shot Demonstration\" instead is a sort of in-context behavioral cloning. \n\nDid not influence the score, but I feel that it may be useful to the readers:\n- Related work: In this paper, the authors analyze LLM agents' performance in decision-making and how they deal with uncertainty and exploration. There are some recent papers in this area that feel very relevant: \n - Controlling Large Language Model Agents with Entropic Activation Steering, Rahn et al., arXiv 2024. This paper investigates exactly the bandit scenario with LLM agents and tries to improve exploration with activation steering using the entropy at the representation level. \n - On the Importance of Uncertainty in Decision-Making with Large Language Models, Felicioni et al., TMLR 2024. Also this paper studies LLM agents in the (contextual) bandit scenario, but it does it by creating a new final layer on top of the pre-trained LLM and uses various approaches to approximate the Bayesian posterior to implement Thompson Sampling and improve the exploration capabilities of the LLM agent." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We measure LLMs' (in)ability to make optimal decisions in bandits and evaluate a set of strategies to train LLMs to explore." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024evolve,\ntitle={Evolve: Evaluating and Optimizing {LLM}s For Exploration},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0Fi3u4RCyU},\nnote={under review}\n}" }, "abstract": { "value": "Despite their success in many domains, large language models (LLMs) remain under-studied in scenarios requiring optimal decision-making under uncertainty. This is crucial as many real-world applications, ranging from personalized recommendations to healthcare interventions, demand that LLMs not only predict but also actively learn to make optimal decisions through exploration.\nIn this work, we measure LLMs' (in)ability to make optimal decisions in bandits, a state-less reinforcement learning setting relevant to many applications. We develop a comprehensive suite of environments that include both context-free and contextual bandits of varying task difficulties to benchmark LLMs' performance. Motivated by the existence of optimal exploration algorithms, we propose efficient ways to integrate this algorithmic knowledge into LLMs: by providing explicit algorithmic guided support during inference; and through knowledge distillation via in-context demonstrations and fine-tuning, using synthetic data generated from these algorithms.\nImpressively, these techniques allow us to achieve superior exploration performance with smaller models, surpassing larger models on various tasks. We conducted an extensive ablation study to shed light on the different factors, such as task difficulty and data representations, that influence the efficiency of LLM exploration. Additionally, we provide empirical measurements on the convergence rate of different exploration strategies introduced." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Large Language Model", "Exploration" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/2d007aaa8021bff3c7f3b9966cec807be1776458.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Evolve: Evaluating and Optimizing LLMs For Exploration" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0FxnSZJPmh
Physics-Informed Deep Inverse Operator Networks for Solving PDE Inverse Problems
main
Active
Inverse Problems;Stability;Operator Learning;Physics-Informed Machine Learning
neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.)
3;5;5
5;3;3
2;2;2
2;2;2
2;3;3
4.333333
3.666667
2
2
2.666667
-1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Questions about experimental results:\n - What are the number of parameters of each of the models in Table 1?\n - Could the authors provide a sensitivity analysis showing how performance changes as the relative weighting between physics and data losses is varied? This would provide valuable insight into the method's robustness.\n - How does PI-DION compare to other methods for solving inverse problems, e.g. Neural Inverse Operators [1]?\n - Any explanation about why the performance hit between supervised and unsupervised PI-DION is larger for Darcy Flow and Helmholtz equation than for reaction-diffusion?\n\n- How limiting is the assumption that there exists stability estimates for the inverse problem?\n- How well do the theoretical bounds from Theorems 2, 3 match the empirical results of Table 1 (reaction-diffusion and Helmholtz)?\n\n1. Neural Inverse Operators for Solving PDE Inverse Problems" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper engages with an important problem in SciML, learning to solve inverse problems based on physics losses without additional training data.\n- The theoretical results are quite interesting. The authors extend standard stability estimates for inverse problems to the operator learning setting. Promisingly, the theorems apply to the reaction-diffusion equation and the Helmholtz equation, two standard benchmarks in the literature.\n- The proposed method is simple and presented clearly and generally.\n- The empirical results are promising. On three standard benchmarks, the authors demonstrate SOTA performance of supervised learning and near-SOTA of unsupervised learning, compared to supervised DeepONet and FNO." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose Physics-Informed Deep Inverse Operator Networks (PI-DIONs), a novel architecture for solving PDE inverse problems without requiring labeled data. Theoretically, the authors extend stability estimates from traditional inverse problem theory to the operator learning setting, and prove universal approximation theorems for PI-DIONs. Empirically, the authors validate their proposed approach through experiments on reaction-diffusion equations, Helmholtz equations, and Darcy flow, achieving SOTA performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The main weakness of the paper is that the empirical results, although promising, are relatively limited and could benefit from some clarification:\n - In Table 1, PI-DION in the supervised learning setting (with 1k training examples) is shown to outperform two different DeepONets and FNOs. However, it's a bit unclear from the paper why this is true, and additional clarification about this would be helpful. Is there a difference in the model architecture / training objective / optimizer between the DeepONets and PI-DION in the supervised setting?\n - See questions for more." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. DeepONet and FNO are used for forward problems traditionally, how did they deal with inverse problems in your experiments?\n2. How is the labeled training target f mentioned in line 399 used? The loss for target f is absent in line 152." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The integration of physics-informed losses into an inverse problem framework based on operator learning is novel, and in principle PI-DIONs can solve the inverse problems (at least in scenarios mentioned in experiments) fast and without the need for labeled data.\n2. Theoretical analysis of the stability estimates is provided." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes an architecture called Physics-Informed Deep Inverse Operator Networks (PI-DIONs), which can learn the solution operator of PDE-based inverse problems without labeled training data. The architecture of PI-DIONs is based on DeepONet, and trained with both the physics-infomred loss and data reconstruction loss. The stability estimates established in the inverse problem literature are extended to the operator learning framework. Experiments are conducted to demonstrate the effectiveness of PI-DIONs in learning the solution operators of the inverse problems without the need for labeled data." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Line 243, \"where the term ∥f − f^\\star∥L2(Ωm) in the righthand side\", there is no such term there. Please clarify the equation in line 242 and include all terms on the right-hand side of the equation. \n2. It seems that the input to the reconstruction and inverse branch networks is fixed in shape, corresponding to the partial measurement with given geometry. The observed data in PINNs can have variable count and locations. Please discuss how PI-DIONs might be adapted to handle variable measurement geometries and if there are any limitations on the types of measurement setups it can handle. \n3. In the experiments, PI-DIONs are compared with purely data-driven DeepONet and FNO, which both did not take physics information into account. If possible, please include comparisons with PINNs in the experiments, since both your PI-DIONs and PINNs are physics-informed methods for inverse problems.\n4. The simultaneous training of physics-informed losses for 1000 samples is a difficult task (similar to train 1000 PINNs simultaneously). I am curious about the training difficulties encountered. Please provide specific details on training time, hardware used, and any convergence challenges encountered. If possible, please also include an ablation study on the effect of sample size on PI-DIONs' performance since smaller sample size may lead to easier optimization.\n5. The theoretical analysis on stability estimate is extended from existing key results that considered the single element case. \n6. Please provide a clear definition of u in line 152 and describe its relationship with partial measurement. In line 456, it is better to write \"f(x,y) = 100x(1 − x)y(1 − y) \", so does line 450. \n\nConsidering the above weaknesses, I give a score of 3 to the current version of this paper." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See above." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. This paper provides a solid theoretical foundation for the proposed PI-DIONs.\n2. The proposed method demonstrates practicality and efficiency in addressing PDE-based inverse problems without the need for labeled data." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces Physics-Informed Deep Inverse Operator Networks (PI-DIONs) for solving PDE-based inverse problems without the need for labeled data. The paper extends existing stability estimates from inverse problem literature to the operator learning framework, ensuring the robustness and generalizability of PI-DIONs across the entire function space and domain." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The contribution lacks novelty. The architecture relies on relatively simple components, such as CNNs and MLPs for the branch and trunk networks. It doesn't introduce significant advancements beyond well-established methods.\n2. The baselines used for comparison, such as DeepONet and FNO, are somewhat dated. The paper would benefit from comparisons with more recent and state-of-the-art methods to better demonstrate the model's competitiveness.\n3. The experimental evaluation is limited in range. Conducting experiments on a broader range of benchmarks would strengthen the validation of the proposed method's effectiveness across diverse problems." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a novel architecture called Physics-Informed Deep Inverse Operator Networks (PI-DIONs), which can learn the solution operator of PDE-based inverse problems without any labeled training data." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024physicsinformed,\ntitle={Physics-Informed Deep Inverse Operator Networks for Solving {PDE} Inverse Problems},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0FxnSZJPmh},\nnote={under review}\n}" }, "abstract": { "value": "Inverse problems involving partial differential equations (PDEs) can be seen as discovering a mapping from measurement data to unknown quantities, often framed within an operator learning approach. However, existing methods typically rely on large amounts of labeled training data, which is impractical for most real-world applications. Moreover, these supervised models may fail to capture the underlying physical principles accurately. To address these limitations, we propose a novel architecture called Physics-Informed Deep Inverse Operator Networks (PI-DIONs), which can learn the solution operator of PDE-based inverse problems without any labeled training data. We extend the stability estimates established in the inverse problem literature to the operator learning framework, thereby providing a robust theoretical foundation for our method. These estimates guarantee that the proposed model, trained on a finite sample and grid, generalizes effectively across the entire domain and function space. Extensive experiments are conducted to demonstrate that PI-DIONs can effectively and accurately learn the solution operators of the inverse problems without the need for labeled data." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Inverse Problems", "Stability", "Operator Learning", "Physics-Informed Machine Learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/ea85cba7e56622565da5179bf4477181c9a32476.pdf" }, "presentation": null, "primary_area": { "value": "neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/f463f0085c22eae21e25bb16b9515703ba69db06.zip" }, "title": { "value": "Physics-Informed Deep Inverse Operator Networks for Solving PDE Inverse Problems" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0G6rRLYcxm
Maximum Next-State Entropy for Efficient Reinforcement Learning
main
Active
Deep Reinforcement Learning; Maximum Entropy Reinforcement Learning
reinforcement learning
3;5;5;6
3;5;3;3
1;3;3;3
2;2;2;2
1;3;3;4
4.75
3.5
2.5
2
2.75
0.132453
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "See questions in Weaknesses." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The experiments cover multiple continuous control tasks, including complex environments like robotic arm control. The results show that MNSE outperforms traditional maximum entropy reinforcement learning methods and other reward-based exploration strategies in these tasks. This indicates the significant potential of the MNSE method in practical applications.\n\n\n2. The paper provides rigorous theoretical analysis and demonstrates its effectiveness, which is significant for advancing research and development in the field of reinforcement learning.\n\n\n3. The paper is written in a clear and concise manner. This helps readers better understand and grasp the core ideas and technical features of the method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This article presents a new reinforcement learning method called Maximum Next State Entropy (MNSE) which optimizes next-state entropy through a reversible action mapping layer. MNSE shows better performance than existing methods in complex environments with nonlinear actuators and emphasizes the importance of appropriate model and parameter settings." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Given that MNSE relies on the accurate estimation of the dynamic model, how do you ensure the accuracy of these estimations and avoid overfitting? \n\nAdditionally, could you provide guidance on how to reasonably select the hyper-parameters to optimize the algorithm's performance?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. What is the advantage of using the function $f$ to increase the gap (and thus control the next state entropy), compared to simply use as intrinsic reward the log likelihood of the inverse dynamics model (and choose f as an identity function, such that : actions = inner actions) in SAC? Similarily, why not simply learning a forward model of the MDP, and using the log likelihood of that model as intrinsic reward, to enforce the entropy of next states? \n2. Could the authors clarify the different parametric functions at hand? What is the advantage of the custom transformation in section 5.3 instead of a normalizing flow?\n3. A discretized multinomial distribution is used for the inverse dynamics model. What is the justification for that instead of a normalizing flow (or auto-encoder + ELBO for learning) and how is it limiting in practice?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "1. The problem at hand is very important to the RL community.\n2. The approach is novel, the authors introduce a new promising intrinsic reward bonus." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose a new maximum entropy reinforcement learning algorithm where the entropy of the next state is enforced while learning the policy. First a particular policy parameterization is used. Inner actions are first sampled according to a parameterized inner policy (i.e., a parameterized distribution from states to features, called inner actions) and the actions are transformations of these inner actions (piecewise linear in practice, such that the density of actions can be computed based on the density of inner actions using the change of variable theorem). Second, the entropy of next states is decomposed as the sum of: the entropy of the inner policy, the expected probability of the inner actions knowing the state transitions (i.e., knowing the current state and the future state), and a constant term. Then the inner policy is maximized using SAC (applying the outer actions in the MDP). The piecewise-linear transformation is computed to maximize the expectation of the probability of inner actions knowing the state transitions. The probability of (inner) actions knowing the state transition is learned by maximum likelihood estimation. This approach eventually leads to better control policies compared to algorithms that only accounts for the entropy of actions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Some points are unclear and have raised questions, some of which may be critical to the paper. See questions bellow.\n2. The authors have missed a large part of the literature that is active in maximizing the entropy of states visited by a policy along trajectories [1, 2, 3, 4, 5]. The latter can be performed with simpler algorithms compared to the one proposed in the paper. In practice those algorithms allow to have a good state coverage, which is the objective pursued by the authors. They should be added in the related works, discussions and experiments.\n3. There are errors (or shortcuts) in some equations and notations that make the paper hard to follow and prevent ensuring the correctness of all mathematical developments. Here are those I noticed:\n\na. In section 3, the reward function is sometimes a function of the action, sometimes not.\n\nb. In equation (2), the distribution $P^\\pi$ is undefined.\n\nc. In section 4.1, how are $p(x)$ and $\\pi$ related? (There is also a clash of notation between the constant $\\pi$ and the policy $\\pi$)\n\nd. The inverse dynamic of inner policy is not defined in the main body.\n\ne. In equation (9), I suppose an expectation is missing over the random variable $s$ in the gap term.\n\nf. In equation (10) and (12), the variable $s$ is again undefined in the optimization problem. Is it on expectation or for all $s$, how is it done in practice?\n\ng. Same problem in equation (13), where a function independent of $s$ equals a function of $s$.\n\nh. In section 5.3, is the $x$-variable in the equation the inner action $e$?\n\ni. In many equations $e$ appear as a variable, but should be replaced by $f^{-1}(a, \\theta)$ as the expectations are over $a$.\n\nj. There are thee notations for parametric functions that are used together. For example, we have $f(e, \\theta)$, $f^\\theta$ and $f_\\theta$.\n\n4. Section 3 focusses on defining conditions under which the action entropy equals the state entropy. The latter is done based on non-redundant actions and non-redundant policies. From my understanding, the inner policy is not non-redundant, and there are no guarantee that the (outer) policy is eventually non-redundant after optimization. While it can be argued that the discussion is in itself interesting, I think it is confusing to introduce at the very beginning of the paper something that is unused afterwards.\n5. There is a methodological error in the experiment. The entropy of the next state is never shown, there is thus no evidence that the method learns high entropy policies.\n\n[1] Lee, L., Eysenbach, B., Parisotto, E., Xing, E., Levine, S., & Salakhutdinov, R. (2019). Efficient exploration via state marginal matching. arXiv preprint arXiv:1906.05274.\n\n[2] Guo, Z. D., Azar, M. G., Saade, A., Thakoor, S., Piot, B., Pires, B. A., ... & Munos, R. (2021). Geometric entropic exploration. arXiv preprint arXiv:2101.02055.\n\n[3] Islam, R., Ahmed, Z., & Precup, D. (2019). Marginalized state distribution entropy regularization in policy optimization. arXiv preprint arXiv:1912.05128.\n\n[4] Hazan, E., Kakade, S., Singh, K., & Van Soest, A. (2019, May). Provably efficient maximum entropy exploration. In International Conference on Machine Learning (pp. 2681-2691). PMLR.\n\n[5] Liu, H., & Abbeel, P. (2021). Behavior from the void: Unsupervised active pre-training. Advances in Neural Information Processing Systems, 34, 18459-18473." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. In the Related Works section, the authors reference several algorithms (such as SAC, DSPG, CBSQL, and max-min entropy frameworks). Could the authors elaborate on why these algorithms were not included in the experiment section? Understanding the selection criteria for comparison could provide further clarity on the position of MNSE within the broader landscape of state and policy entropy methods.\n\n2. SAC is used as the baseline for updating the policy entropy term in the MNSE framework. It would be interesting to learn how MNSE might perform if alternative algorithms, such as DSPG or CBSQL, were used instead. Could the authors discuss potential outcomes or the theoretical basis for choosing SAC over these other algorithms? Insights into how the baseline choice affects MNSE’s performance would be helpful for researchers considering alternative implementations of the framework.\n\n3. The entropy of all visited states has long been an interesting topic, though challenges remain in addressing it within a solid theoretical framework. Could the authors discuss the correlation between next-state entropy and the total entropy of visited states? This could provide further insight into MNSE’s implications for overall state entropy in agent behavior." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper offers a fresh perspective on state novelty by theoretically linking next-state entropy and policy entropy. While state novelty algorithms are known to enhance agent performance in various environments, the theoretical analysis of state entropy remains underexplored. This paper addresses this gap by establishing a detailed connection between next-state entropy and policy entropy, achieved through an internal policy and an action mapping function.\n\n2. The authors provide a rigorous and structured proof process, making it easy for readers to follow the logical progression and understand the interplay between the entropies. This systematic approach gives a solid foundation for the proposed MNSE framework, and the clarity of the theoretical contributions makes the complex subject matter more approachable for readers.\n\n3. The MNSE algorithm is an impressive practical outcome of this research, showcasing strong empirical results in environments with redundant action spaces. This suggests that the algorithm could be beneficial for a wide variety of applications where action redundancy exists, offering new avenues for exploration in reinforcement learning." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a thorough theoretical analysis of the relationship between maximizing next-state entropy and policy entropy. The authors propose a novel framework that links these two types of entropy through an innovative approach that utilizes an inner policy and an action mapping function. Based on this theoretical foundation, the authors introduce the Next-State Entropy Maximization algorithm (MNSE), which is shown to be particularly effective in environments with redundant action spaces. This work contributes valuable insights into entropy maximization, bridging next-state novelty concepts with policy design in reinforcement learning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper includes performance comparisons at 20% and 40% EAP, as seen in Experiment 2. However, expanding these comparisons to include higher EAP levels, such as 80%, 60%, and 100%, would be beneficial. Analyzing performance across a broader range of EAP settings could offer a more comprehensive view of the algorithm’s robustness and adaptability to different entropy thresholds.\n\n2. The current experiments effectively demonstrate MNSE’s performance in Mujoco and Meta-World environments. However, adding further experiments focused on pure exploration tasks—such as those found in maze environments or other exploration-heavy scenarios—would be valuable. Such experiments could provide deeper insights into how MNSE's maximization of next-state entropy impacts exploration behavior, highlighting its effectiveness in environments where exploration quality is critical.\n\n3. While MNSE is compared with well-established state novelty and exploration algorithms, such as MinRed and NovelID (both published in 2021), comparisons with more recent approaches (from 2022 or 2023) could further strengthen the relevance and appeal of this work. Including newer algorithms in the comparative analysis could provide a more current context for MNSE’s performance and underscore its competitiveness among recent advancements in state novelty and exploration research." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Based on the theory, when effective action proportion is 100%, the MNSE should have the same performance as SAC (the base model adopted by MNSE). But in Fig 4. there are some differences, any explanation or analysis on this observation?\n\n2. For eq (4), how can we guarantee the mu variable is positive related with the entropy? The reviewer did not see any further analysis on this part (not even in the appendix).\n\n3. All experiments are conducted in the continuous action space, will maximum next-state entropy benefit the policy learning in discrete action space environments?\n\n4. Can you explain in more details on how to derive the content of equation (8)?\n\n5. To minimize the gap term in Theorem 5.1, step 1 and 2 are provided in equation (11) and (12). Why the parameters of the inverse dynamic of inner policy are optimized first, instead of the ones of the mapping layer?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is well-written and clear, with a solid theoretical analysis and a sufficient set of experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper theoretically highlights the distinction between policy entropy and next-state entropy in Markov Decision Processes (MDPs) and makes a compelling argument that the two entropies are equivalent if the policy is non-redundant---meaning that different actions lead to different next states given the same state in the MDP. The paper then shifts its focus to demonstrating the advantages of incorporating maximum next-state entropy into the reinforcement learning process in MDPs. This is done by deliberately introducing saturation and deadzone effects into the control system to create redundant policies. Numerous experiments demonstrate their method can outperform the baselines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. In the introduction, the authors present equipment aging as an example of redundancy in the action space. However, this example does not fully convince the reviewer. Specifically, reinforcement learning assumes that the Markov Decision Process (MDP) remains consistent. When changes occur in the action space, such as those caused by aging equipment, the previously learned policy may no longer perform effectively within the altered MDP. Further clarification or a more suitable example might strengthen this argument.\n\n2. There is an abuse of notation for reward function r(s,a) while the paper assumes the reward is only affected by states." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We aim to maximize the next-state entropy by constructing an action-mapping layer and maximizing the policy entropy of the inner policy." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024maximum,\ntitle={Maximum Next-State Entropy for Efficient Reinforcement Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0G6rRLYcxm},\nnote={under review}\n}" }, "abstract": { "value": "Maximum entropy algorithms have demonstrated significant progress in Reinforcement Learning~(RL), which offers an additional guidance in the form of entropy, particularly beneficial in tasks with sparse rewards. Nevertheless, current approaches grounded in policy entropy encourage the agent to explore diverse actions, yet they do not directly help agent explore diverse states. In this study, we theoretically reveal the challenge for optimizing the next-state entropy of agent. To address this limitation, we introduce Maximum Next-State Entropy (MNSE), a novel method which maximizes next-state entropy through an action mapping layer following the inner policy. We provide a theoretical analysis demonstrating that MNSE can maximize next-state entropy by optimizing the action entropy of the inner policy. We conduct extensive experiments on various continuous control tasks and show that MNSE can significantly improve the exploration capability of RL algorithms." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Deep Reinforcement Learning; Maximum Entropy Reinforcement Learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/4c78973f7f9db5d7149df9b1fd3796be00f6c424.pdf" }, "presentation": null, "primary_area": { "value": "reinforcement learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/14bf76fba1cd2158017920372e5246d6721d7886.zip" }, "title": { "value": "Maximum Next-State Entropy for Efficient Reinforcement Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0GC81gpjOo
Cognitive Insights and Stable Coalition Matching for Fostering Multi-Agent Cooperation
main
Active
Multi-Agent Cooperation;LLM;Theory of Mind
other topics in machine learning (i.e., none of the above)
3;3;6;6
4;4;3;2
2;2;3;3
2;3;3;3
2;2;3;3
4.5
3.25
2.5
2.75
2.5
-0.904534
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- For the insight that low ToM exhibits better cooperation compared to high ToM, I wonder how specific this is to the environment being looked at. For example, the multi-agent programming setting, at least to me, does not strike me as an environment that requires much ToM to successfully cooperate in, therefore low ToM being more successful may simply be due to the lower complexity of using it. Have the authors noticed this same trend in other environments?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The related work is clear and concise.\n- The paper is well motivated\n- The empirical results for HumanEval demonstrating the effectiveness of the matching mechanism to match agents to those that they are able to accurately predict beliefs about is promising. This is alongside promising improvements in terms of Pass@1 rates.\n- The empirical results are similarly promising in terms of problem solving and general reasoning." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper focuses on the problem of cooperation in multi-agent systems when the agents are LLM agents. In particular, this work focuses on how theory of mind interacts with cooperation and introduce a mechanism for designing diverse ToM groups amongst these agents that optimise the overall cooperative performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Whilst the authors do mention that the coalition formation is generally an NP-hard problem, they do not offer any ideas about potential future possibilities that would help with the scalability of the framework\n- I do not understand the prompt referenced in Appendix A and the corresponding LLM output. The belief model is rather vague, and when looking at the output of the alignment scores it seems a bit arbitrary - e.g. the belief model does not mention using an object oriented approach, but in the alignment score this seems to be highly valued? I am just slightly concerned that some of the alignment scores outputted by the LLMs are not particularly strong signals and ideally it would be measured using something more robust.\n- Overall, my main concern is the potential scalability of the proposed framework, with firstly the coalition forming being difficult and secondly the requirement to generate beliefs over all other agents. Furthermore, whilst the empirical results are good and I am not downplaying them, I am not convinced the proposed settings are those that can really leverage ToM fully. However, this is not impacting my score." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "None" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1)\tFor a non-LLM environment, how will the matching scores be calculated?\n2)\tIn the debate environment the baselines where both affirmative and negative lead to a bias of the affirmative winning 65.45% of the time, that is they are both using the same method. This is a cause for concern that this result may not be robust enough and might simply be taking advantage of this bias, is it possible to show the results the other way around? (With your model placed in the negative.)" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is easy to follow, with the appendix clearly aiding in understanding how the models function.\n\nThere is a clear logic as to why each component is added, this is shown through the experimentation and the results. Especially the need for adding coalition matching on top of the theory of mind.\n\nThere is a clear increase in Pass@1 the iterative programming environment with this model.\n\nThere is a clear increase in accuracy in the logic and reasoning problems compared to existing methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors present a method for using using Theory of Mind (ToM) and a coalition matching algorithm to allow LLM agents (using various LLM models) to cooperatively perform tasks in environments such as:\n\n-Iterative Programming (HumanEval, MBPP)\n\n-Debate (Two cooperative teams compete, with affirmative team taking on various forms of the model (no-ToM, ToM without matching, ToM with matchin), and negative team takes the baseline no-ToM)\n\n-Logical and General Reasoning (Using AQUA-RAT and MMLU datasets)\n\nThe k-level ToM is set to take in an observation, the action of all agents at the previous timestep and the belief of the actions of all agents at the previous timestep, at the 0-level this is set to start with no belief. These are open-ended action spaces defined by natural language, and the observations, actions and beliefs are textual outputs. (The prompts of these are demonstrated in the appendix)\n\nThe Matching coalition algorithm takes a set of LLM agents and the possible matchings of these agents. It then assigns a preference order of these matchings. It aims to create stable matchings based on this preference order, such that agent i prefers agent j over all other pairings and agent j prefers agent i and neither agent has incentive to deviate from this pairing. A specific rule for this preference order is define based on the alignment, based on semantic similarity as calculated by the LLM, between beliefs of actions and the actual actions, and the agents are only matched if this is above a certain threshold.\n\nThe results show that without matching lower ToM levels have higher cooperative trends, while with matching higher ToM levels have better cooperative trends. In all shown environments the ToM w. Matching (their method) outperforms the baselines of no-ToM, or ToM w.o. matching." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The calculation of the semantic similarity of beliefs and actions is left to the LLM, this does not lend itself to a general approach as the title alludes to. It is made clear throughout the paper that this is applied to LLMs however and I do not see this as a big weakness, but would like to see this made clear in the title if possible.\n\nIn the debate environment the baselines where both affirmative and negative lead to a bias of the affirmative winning 65.45% of the time, that is they are both using the same method. This is a cause for concern that this result may not be robust enough and might simply be taking advantage of this bias, is it possible to show the results the other way around? (With your model placed in the negative.)\n\nMinor: The acronym FTM (Fraction of trust members/Frequency of team matching) is used multiple times making some sections difficult to understand." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "How reliable are the alignment measurements provided by LLMs? \n\nHow are the specialized ability scores used in evaluations, and what is their impact?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The introduction and related work section motivate the research pretty well. \n\nThe idea of guiding multi-agent collaboration through ToM and belief alignment is novel. \n\nThe authors conduct comprehensive evaluations across diverse task scenarios and base LLMs, presenting both quantitative and qualitative results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work examines the influence of different levels of Theory of Mind capabilities on collaboration among multiple Large Language Model (LLM) agents. The authors propose an algorithm to enhance LLM agents’ teamwork performance by matching partners with high belief alignment. While the idea of guiding multi-agent collaboration through ToM and belief alignment is novel, this paper presents the proposed method in a less comprehensive manner, missing many important details. Researchers may encounter difficulties when applying the proposed algorithm in specific scenarios. Additionally, the claimed conclusions do not align well with the empirical results and therefore need further clarification." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The ToM formulation presented in Section 4.1 deviates from the common definition of higher-order ToM. When conducting recursive ToM inferences at level-k, agents are only given their own belief at level-(k-1) rather than the beliefs of other agents. I recommend that the authors refer to [1] for its definition of higher-order mental state inference.\n\nThe proposed alignment measurement in Section 4.2 may not apply to general high-order ToM inferences in multi-agent systems. For example, “what I think A is thinking about B’s action” and “what I think C is thinking about B’s action” are different 1-level ToM inferences that result in the same alignment measurement as defined in this paper. The authors might want to explicitly define the format of beliefs to clarify the formulation.\n\nThe multi-agent setup for each evaluation scenario is not clearly described. It is unclear how many agents are involved, what their action and observation spaces are, and how they collaborate. For instance, the interactive programming scenario appears to be a centralized system with full observation, as the PM is the only agent making decisions and ToM inferences. Then the value of ToM is less salient in such a single-agent system.\n\nThe two evaluation metrics are the optimization objectives of the proposed algorithm rather than direct measurements of LLM agents’ collaboration performance or “cooperation trends.” The claim that “agents with higher ToM capabilities may not necessarily exhibit better cooperative trends” conflicts with the results shown in Tables 3 and 4, where agents with ToM perform better. I recommend using other metrics, such as task completion rate or efficiency, to provide consistent conclusions and increase criterion validity.\n\nThe proposed algorithm is vague and highly dependent on specific prompt design when generalizing to different task scenarios. For instance, what happens when an agent is assigned to cooperate with a given partner (line 8 of Algorithm 1) is not clearly defined for each scenario. This ambiguity could lead to potential bias in evaluations. In the debate case study (i.e., lines 495-497), the ToM with matching condition has two LLM agents forming arguments, while the other conditions only involve one. The performance advantage might be due to the increased number of agents via self-reflection, rather than the proposed matching algorithm.\n\n\n[1] Ying, L., Jha, K., Aarya, S., Tenenbaum, J. B., Torralba, A., & Shu, T. (2024). GOMA: Proactive Embodied Cooperative Communication via Goal-Oriented Mental Alignment. arXiv preprint arXiv:2403.11075." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see the Weaknesses section for my questions. I also welcome the authors to correct any misunderstandings I may have about their paper." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The questions underlying this paper are certainly interesting. While much work has focused on the improved cooperation using ToM, few (to my knowledge) have investigated how this might hinder cooperation, at least not (again, to my knowledge) in the context of LLM agents. More generally, the issue of intelligent coalition formation in this context is an interesting problem and one that I believe will have increasing real-world relevance in the coming years. The idea of alignment of beliefs in order to solve this problem is natural and original (again, that is to the best of my knowledge, though I also would not be at all surprised if a version of this had been studied before in the game theory or multi-agent systems literatures, outside the context of LLMs). I also appreciated the effort the authors put in to studying a relatively wide variety of tasks, models, and frameworks for multi-LLM problem-solving. The presentation of their results was largely clear." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the concept of theory of mind (ToM) in the context of one LLM agent forming and managing coalitions of other LLM agents. They show that prompting the first LLM agent to engage in 2-level reasoning about the others' beliefs can actually _hinder_ performance compared to 1-level reasoning (cf. the general concept of k-level reasoning). They introduce a method of comparing and matching agents based on their ability to predict each other's actions. Their experiments study how the use of that metric in forming coalitions of LLM agents impacts the agents' ability to solve problems in the domains of programming, logic, and general reasoning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Unfortunately, despite its positive aspects, I think the paper does have some significant issues. In what follows I have attempted to cluster these and order them approximately by importance.\n\n**Matching Algorithm Confusions**\n\nThe matching algorithm seems underspecified in several places, and it is not always clear what the authors are actually doing. Moreover, I didn't feel that the underlying theoretical principles were always appropriate. More concretely:\n- The authors start by talking about the set of an agent $i$'s partners $\\mu(i)$, but based on their matching algorithm they seem to implicitly assume that $\\mu(i)$ is always a singleton (see Equation 2). Otherwise, matchings are stable only when player $i$ would not prefer to be partnered with $j$ over its current _set_ of partners $\\mu(i)$. But what would that mean? Are preferences between sets of agents instead of single agents? If so, the relevant comparison would presumably be that agent player $i$ would not prefer to be partnered with any _set_ of other agents over its current _set_ of partners $\\mu(i)$.\n- In line 9 of algorithm 1, what happens after agents signal a desire to re-match? What if the belief misalignment measure is greater than the tolerance $\\epsilon$ for all other agents? Does the agent end up in a singleton coalition? The authors state that the iterative process of coalition formation ends in a stable matching but they do not actually prove this. Especially with the introduction of preferences based on differing skills (see the next point), I actually suspect that it would be trivial to create a cyclic matching problem.\n- The preference order described in equation 4 are based on agents having different skill levels $\\alpha_i$ on different tasks, but where do these skill levels come from? More importantly, if the point is to match agents with complementary skills, why does the matching algorithm only compare agents' skills on a _single_ task?\n- Minor: the authors say on line 244 that the alignment between beliefs and actions is not mathematical subtraction, despite them denoting it that way. I would strongly suggest not denoting it using subtraction to begin with and being more explicit about what the distance measure here actually is.\n\n**Strength of Motivating Claim**\n\nThe authors' motivating claim is that lower-level ToM abilities may improve the ability of agents to cooperate beyond higher-level ToM abilities. Their justification for this is a setting where one agent -- a \"Project Manager\" (PM) -- is instructed to use either 1-level or 2-level reasoning to organise several other agents (all of which are instructed use 0-level reasoning). But this essentially means that in the latter case the PM is being instructed to reason about the agents acting in a way that they do not in fact act. Explained this way, it is still somewhat interesting but by no means surprising that the PM is less successful when prompted to reason using higher-level ToM. Essentially, $k$-level reasoners are designed to best respond to $(k-1)$-level reasoners, not $(k-2)$-level reasoners.\n- Relatedly, looking through the actual LLM outputs included in the appendices, the level-2 ToM responses seem quite strange. They are worded as if they are predicated on the other agents _actually observing_ actions in advance, rather than _anticipating_ instructions. I am not really sure what is going on here, but reading through it was not at all surprising that the higher ToM agents performed less well on the task, as they appeared to be being mis-instructed.\n- As a final sub-point on this topic, I suggest that the authors also benchmark against a 0-level PM and against settings where the agents are 1-level or 2-level reasoners, at least for their motivating experiments described in Table 1.\n\n**Missing Experimental Details**\n\nThere are several (relatively minor) aspects missing from the discussion and presentation of the experiments that, if present, would improve the paper.\n\n- There are no error bars or reports of standard errors for the experimental results, making it difficult to interpret their statistical significance.\n- I assume the ToM level for debating agents arguing for the negative side is 0, but it would be good to clarify this.\n- Once coalitions are formed, how do the prompts/instructions given to the agents in different coalitions actually change?\n- How many agents are actually present in the various settings, and what are the sizes of the coalitions that are formed?\n\n**Game-Theoretic Reasoning and Precision of Claims**\n\nThis is a relatively minor, but a few times I found myself slightly frustrated by the authors claims, which I believe did not fully take into account the relevant game-theoretic concepts (see also the confusing use of what appears to be a binary matching algorithm for n-player coalition formation, described further above). \n\n- A key example is the authors' claim that the so-called \"Fraction of Trust Members (FTM)\" is a good measure of what they term the \"cooperative trend\" (N.B. to be grammatical, this should probably be \"Trusted\" not \"Trust\", though it is not actually clear what the relevance of the concept of \"trust\" even is here). But belief alignment by itself does not imply higher levels of cooperation. I may have perfectly accurate beliefs about what you are going to do in a two-player zero-sum game (where cooperation is definitionally impossible). Thus, it is clearly not true that in general \"a higher FTM value [indicates] a more cooperative agent\", as claimed in line 410/411. \n- Relatedly, the authors talk about ToM improving cooperation but is more about ToM improving the facilitation/management skills of a single PM agent. This is also a very interesting and valid topic of study, but I suggest the authors change the phrasing slightly throughout the paper to better reflect the rather narrow form of cooperation problem they consider. Indeed, my understanding is that the authors largely focus on the case where only one agent (a PM) is imbued with ToM." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024cognitive,\ntitle={Cognitive Insights and Stable Coalition Matching for Fostering Multi-Agent Cooperation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0GC81gpjOo},\nnote={under review}\n}" }, "abstract": { "value": "Cognitive abilities, such as Theory of Mind (ToM), play a vital role in facilitating cooperation in human social interactions. However, Large Language Model (LLM) agents with higher ToM abilities do not necessarily exhibit better cooperative behavior compared to those with lower ToM abilities, highlighting the complexity of translating human cognitive processes to artificial intelligent agents. To address this challenge, we propose a novel matching coalition mechanism that leverages the strengths of agents with different ToM levels by explicitly considering belief alignment and specialized abilities when forming coalitions. Our proposed matching algorithm seeks to find stable coalitions that maximize the potential for cooperative behavior and ensure long-term viability. By incorporating cognitive insights into the design of multi-agent systems, our work demonstrates the potential of leveraging ToM to create more sophisticated and human-like coordination strategies that foster cooperation and improve overall system performance." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Multi-Agent Cooperation", "LLM", "Theory of Mind" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/6cc6074045cfc176c21712535396f06d8691f1f6.pdf" }, "presentation": null, "primary_area": { "value": "other topics in machine learning (i.e., none of the above)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/e5ad83a97121bca8f9eb045fde2d27c91341e879.zip" }, "title": { "value": "Cognitive Insights and Stable Coalition Matching for Fostering Multi-Agent Cooperation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0GzqVqCKns
Probing the Latent Hierarchical Structure of Data via Diffusion Models
main
Active
data structure;hierarchical compositionality;diffusion models;statistical physics;phase transition
other topics in machine learning (i.e., none of the above)
5;5;6;6
3;2;3;3
2;3;3;3
2;3;3;3
2;2;3;3
5.5
2.75
2.75
2.75
2.5
0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "(1) The paper introduces novel approaches for analyzing the structure of inputs using pretrained diffusion and language models.\n\n(2) The authors offer a thorough analysis and derivation, with experimental results closely aligning with theoretical expectations.\n\n(3) Multiple schematic diagrams and data visualizations are included, providing valuable insights into the methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper examines the hierarchical correlation structures among input tokens using a dynamic correlation function and dynamical susceptibility within a forward-backward experimental framework. These variables reveal how two input tokens respond to perturbations when attempting to recover data from noisy inputs. Analyzing diffusion and language models, the study demonstrates an anticipated correlation aligned with spatial structures." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(1) The paper’s presentation could be improved. While there are numerous figures to aid understanding, the main text is somewhat challenging to follow.\n\n(2) Why is the σ in Equation 3 binary? Wouldn’t a continuous measurement be more appropriate? For instance, a small difference in pixel values might not alter the semantic structure of the images, but it would be captured by binary measurement.\n\n(3) Shouldn’t the spatial correlation structures be content-dependent? For example, if the bird and the laptop in Figure 5 were moved slightly farther from the camera, would this change affect the result shown in Figure 2?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the weaknesses section." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The hierarchical perspective provides novel insights into the diffusion model's mechanism and the application of physics is also refreshing. I feel that the community can benefit from these insights, which may give rise to empirical advancements.\n2. The paper is well written and clearly communicates the main ideas.\n3. The experiments on natural data (image/text) support the theoretical claims." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper aims to understand diffusion models through a hierarchical latent variable model. Through this framework, this paper demonstrates the connection between the noise level and the hierarchical levels, as evidenced by a transition phrase. This paper builds on the tools from physics and illustrate their theoretical model with empirical results on practical models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. It'd be great to see attempts at utilizing the theoretical/empirical observations to advance practical model design. Some discussions along this direction would also be appreciated.\n2. The tree model seems overly simplified for real-world data like images and languages. For example, one would imagine two high-level variables could become co-parents for some low-level variables, thus breaking the tree structure. I would appreciate a discussion on this limitation and the applicability of the theoretical framework to more general latent models." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. From the analysis, BP denoising appears to be a one-step method that directly samples $\\hat{x_0}$ from the noisy observation $x_t$, differing from typical diffusion denoising that iteratively samples $x_{t-1}$ from $x_t$ throughout the process. Does this discrepancy exist, or are the authors also using a denoising schedule similar to real diffusion models?\n\n2. Can we interpret the maximal correlated length achieved at an intermediate noise level (time step) as the model generating class information or lower-level features? If so, this would contrast with existing observations that diffusion processes follow a coarse-to-fine generation pattern (e.g., https://arxiv.org/abs/2303.02490), where lower-level features are generated at the beginning, not in the middle.\n\n3. Figure 4(a) is somewhat unclear. Combined with (c), it seems the authors are suggesting that the largest correlated changing chunk appears at a masking fraction $t/T \\in [0.5, 0.7]$. However, this is not immediately evident from (a) alone." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The authors aim to capture hidden hierarchical structures within discrete data using the RHM model, with their RHM+BP framework supporting both discrete and continuous diffusion processes.\n\n2. By applying BP for denoising, the authors rigorously analyze phase transitions in the denoising results and identify the critical noise level needed to induce a change in the data class (or low-level feature)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors examine the hierarchical structure in high-dimensional data by conducting forward-backward experiments within diffusion-based models. They employ a Random Hierarchy Model (RHM) for the data where the tokens of data are generated from a tree structure of latent, they also use Belief Propagation (BP) as the score function to denoise samples.\n\nThe authors focus on the phase transition of the average belief $p_L$ of the RHM's root node by analyzing an iterative mapping (Equation 7) and identifying a critical noise level $\\epsilon^*$ at which the transition occurs. Based on that, they also compute the minimum layers $\\tilde {l}$ needed for the transition, beyond which $p_l$ would collapse to trivial fixed points $\\{1/v,1\\}$, indicating either a complete reconstruction or randomization of upper latent variables. At this specific noise level, BP can modify the deepest latent layer $\\tilde {l}$, yielding the maximum correlation length (i.e. big \"chunks\" of data token), which is the distance over which token changes remain correlated.\n\nTo characterize this effect, the authors introduce **dynamical susceptibility** which exhibits a first increase then decrease curve as expected. They further demonstrate that the dynamical susceptibility curve has the same trend for forward-backward experiments with diffusion models and synthetic RHM experiments." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper is somewhat disorganized and hard to follow, as definitions, derivations, and experimental results are heavily interwoven. To improve clarity, consider using theorems or structured definitions to better organize the content (e.g. by moving some derivations, such as Equations 8 and 9 to appendix and summarizing them as a main theorem).\n\n2. In practice, people use real data + score-based denoising; however, the authors use RHM data + BP denoising instead. This discrepancy is insufficiently justified, making the claim that real-world data shares the same hierarchy as RHM unconvincing. While the authors show a similar phase transition phenomenon between the RHM case and real-world diffusion case, they do not rigorously establish a connection between them. Verification by testing real-world diffusion on RHM data may strengthen this claim.\n\n3. The results are somewhat vague and lack practical insights, as it appears that neither the RHM setup nor the forward-backward experiment has direct practical applications. Although the authors mention interpreting the \"chunks\" that emerge during the forward-backward experiments, they do not provide further discussion or related work on that." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "L57: Does such a divergence definitely indicate a hierarchical structure or are there other ways/reasons divergence could occur? i.e. is this divergence a “proof” of (or very strong evidence for) hierarchy?\n\nL210: Clarification: so, the epsilon-process is itself a mean-field approximation of the discrete diffusion process, but then you use another mean-field on top of that to compute the correlation?\n\nL356: Can you elaborate on why a susceptibility peak is a ‘smoking gun’ for hierarchy? Just because one nonhierarchical example doesn’t have a susceptibility peak doesn’t mean there might not be others that do?\n\nL511: How (or does) this relate to the diffusion-as-spectral-autoregression point of view? Also there is a typo, 'trough'.\n\nGeneral: \n\nDoes the susceptibility divergence tell us anything about how many levels of hierarchy are likely present? Or just that there is at least one level?\n\nThe RHM is discrete, and discrete vs continuous diffusion are rather different; can you justify why RHM should be a good model for continuous data/diffusion as well?\n\nDo the MDLM and ImageNet expts actually confirm that a phase transition occurs? Or do we just observe the susceptibility peak and infer a phase transition by analogy to RHM? In particular, it seems that for ImageNet it might actually be possible to run a classifier to determine whether the class changed." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is clearly written and the RHM model is nicely studied. The text and image experiments are well-designed (although I still have some questions, below, about how well the conclusions from RHM transfer to real data). I appreciate the application of ideas from Physics to ML problems." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper suggests that forward-backward diffusion experiments can be helpful in uncovering hierachical structure in data. They first study a synthetic Random Hierarchical Model and show that a peak of the dynamical susceptibility (related to correlations between blocks of tokens) occurs at a noise level where a phase transition is known to occur in the RHM (i.e. the latent class at the root changes). They then show peaks in the susceptibility in text and image experiments." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Please see Questions." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "A hierarchical structure in the data induces a diverging correlation length at a phase transition in diffusion models, which is observed also in text and images." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024probing,\ntitle={Probing the Latent Hierarchical Structure of Data via Diffusion Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0GzqVqCKns},\nnote={under review}\n}" }, "abstract": { "value": "High-dimensional data must be highly structured to be learnable. Although the compositional and hierarchical nature of data is often put forward to explain learnability, quantitative measurements establishing these properties are scarce. Likewise, accessing the latent variables underlying such a data structure remains a challenge. Forward-backward experiments in diffusion-based models, where a datum is noised and then denoised, are a promising tool to achieve these goals. We predict in simple hierarchical models that, in this process, changes in data occur by correlated chunks, with a length scale that diverges at a noise level where a phase transition is known to take place. Remarkably, we confirm this prediction in both text and image datasets using state-of-the-art diffusion models. Our results suggest that forward-backward experiments are informative on the nature of latent variables, and that the effect of changing deeper ones is revealed near the transition." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "data structure", "hierarchical compositionality", "diffusion models", "statistical physics", "phase transition" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/26e314e2c1580aed2ba936b954b47b9c9928e08b.pdf" }, "presentation": null, "primary_area": { "value": "other topics in machine learning (i.e., none of the above)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Probing the Latent Hierarchical Structure of Data via Diffusion Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0HWAbWgI3T
A Geometric Approach to Personalized Recommendation with Set-Theoretic Constraints Using Box Embeddings
main
Active
Box Embeddings;Personalized Query;Set-based embeddings;Recommendation
unsupervised, self-supervised, semi-supervised, and supervised representation learning
3;5;5
3;4;3
2;3;2
2;2;2
1;2;2
4.333333
3.333333
2.333333
2
1.666667
0.5
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "The paper introduces the notation of query in the context of recommendation systems. What is the difference between query in the context of this paper and query in the context of search engine like Google?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper studies an interesting yet not well explored problem: personalized item recommendation with set-theoretic queries. The set nature of queries, which consists of set relationships such as negotiation and intersection, leads to an interesting research question: how to capture such relationships to make more accurate recommendations.\n\n2. The employment of box embeddings to represent users, items, and attributes is intuitive and sensible to capture set relationships between these three.\n\n3. Experimental results on four real-world datasets demonstrate good performance of the proposed box embedding method, outperforming vector embedding approaches.\n\n4. The paper is well-structured and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes using box embeddings for matrix completion to improve personalized item recommendation with set-theoretic queries. Box embeddings are employed to bypass the limitation of commonly used vector embeddings, which might fail to recommend items for set-theoretic queries consisting of negotiation and intersection relationships. By representing users, items. and attributes as box embeddings, i.e., hyper rectangles in d-dimensional space, the proposed approach can jointly factorize user-item interaction matrix and item-attribute matrix. Then, users and attributes are regarded as boxes containing multiple items. As such, given a query containing set relationships between attributes, the model retrieves top items having the largest box volume shared with those of users as recommendation list. The whole model is trained to capture containing relationships, i.e., user and attribute boxes contain multiple item boxes. Experimental results on four datasets demonstrate the strong performance of the proposed method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Despite showing promising recommendation results, the proposed method seems to be a direct application of box embeddings for personalized item recommendation with set theoretic queries, which is somehow a limited contribution.\n\n2. The paper relies heavily on the theory of box embeddings but some key concepts were not described sufficiently. For example, in line 191, how to guarantee $x^\\llcorner < x^\\urcorner$ for all dimensions $d$; what is the definition of $VolIntGB$ in Equation 3?; what are the parameters of the model to optimize?\n\n3. The baselines are somewhat limited. Although authors already mentioned in Section 2.1., representative baselines such as LightGCN [1] and MultiVAE [2] were not considered. Including more recent and advanced baselines will further ascertain the strength of the proposed method.\n\n[1] He et al. Lightgcn: Simplifying and powering graph convolution network for recommendation. SIGIR 2020.\n[2] Liang et al. Variational autoencoders for collaborative filtering. WWW 2018.\n\n4. Lack of efficiency analysis. What is the advantage of using box embeddings over vector embedding w.r.t. running time?\n\n5. Missing descriptions of some important experimental settings, e.g., how many negative sampled required to train Equations in lines 233 and 240. Moreover, ablative analysis of key hyper-parameters is also not presented. For instance, how the number of negative samples affect the model accuracy? The same questions for $w$ in line 240." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "### Questions\n - (Q1) The authors used nDCG@K for model selection, but evaluated the final performance based on HR@K. Why did the authors use inconsistent metrics for validation and testing? In my opinion, HR@K with 100 negative items is a rather insensitive measure for ranking evaluation. I would like to recommend using Recall@K without negative sampling.\n - (Q2) In the current manuscript, the authors do not mention/discuss the result on MovieLens-20M (Table 7 in Appendix B.3). Also, Table 7 is not self-contained. What is the definition of VEC-*? (probably MF or NeuMF?)\n \n### Comments.\n - (C1) To my understanding, \"attribute-specific query recommendation\" is the task where (1) item attribute values are partially observed and often missing, and (2) in the prediction phase, the positive items are conditioned not only on a user but also on a boolean query. The problem of (1) has been addressed in existing studies (e.g., [b,c]). I am not familiar with (2) in the context of item recommendation, but there might be existing research on it. A discussion/comparison of existing studies on these points would make it easier to understand the novelty of this work.\n - (C2) In line 368, the authors report that they followed the standard sample scoring procedure described in Rendle et al. (2020). However, to my understanding, using this sampling technique is not recommended for a dataset with a small item catalog such as Last-FM, MovieLens-1M, NYC-R. It may just undermine the reliability of the reported results to reduce a small experimental cost.\n\n\n## References\n\n [b] Wu, Le, et al. \"Joint item recommendation and attribute inference: An adaptive graph convolutional network approach.\" Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval. 2020.\n\n [c] Xian, Yikun, et al. \"Ex3: Explainable attribute-aware item-set recommendations.\" Proceedings of the 15th ACM Conference on Recommender Systems. 2021." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The authors tried to use the latest technique in the NLP field (i.e., box embedding) for a recommendation-related task. \n2. The authors have carefully designed an evaluation protocol for attribute-specific query recommendation based on traditional collaborative filtering." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors apply box embedding to attribute-specific query recommendation.\nThey formulated the task of attribute-specific query recommendation and proposed a recommendation method based on box embedding for the task.\nThe authors also tried establishing an evaluation protocol for this new task and provided detailed analyses based on generalization spectrum gap and compound error.\nOn the other hand, the current manuscript severely lacks a discussion of existing recommendation fields (e.g., context-aware recommendation), and the technical novelty is unclear." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The scope of this study is on a rather limited application, which the authors called \"attribute-specific query recommendation\".\n2. The current manuscript lacks related work on \"attribute-specific query recommendation\". In addition, the authors should discuss the relationship between this study and context-aware recommender systems [a].\n3. Some experimental settings are not convincing. See the following questions/comments for details.\n\n### References\n[a] Adomavicius, Gediminas, and Alexander Tuzhilin. \"Context-aware recommender systems.\" Handbook of Recommender Systems. Boston, MA: Springer US, 2010. 217-253." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "It is suggested that an efficiency comparison between the traditional vector embedding methods and the proposed box embedding method be discussed." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1.\tThe study models attribute-specific query recommendation as \"set-theoretic matrix completion,\" treating attributes and users as item sets.\n2.\tThe paper effectively demonstrates the limitations of existing vector embedding models for this specific task.\n3.\tExperimental results validate the effectiveness of the proposed model." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work addresses the task of personalized recommendation using set-theoretic queries. The authors frame this problem as \"set-theoretic matrix completion,\" highlighting that traditional approaches, such as logistic matrix factorization, do not align with the set-theoretic operations needed during inference." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tThe approach presented in this paper conflicts with the widely used matrix factorization model, which effectively leverages collaborative filtering signals between users and items. It is unclear how the proposed model addresses these signals.\n2.\tThe experimental baselines are not state-of-the-art; comparing the proposed method to more advanced recommendation models would better demonstrate its advantages.\n3.\tSome equations are difficult to follow due to unclear notation explanations." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Set-theoretic embeddings offer the appropriate inductive bias needed to effectively answer queries with set constraints." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024a,\ntitle={A Geometric Approach to Personalized Recommendation with Set-Theoretic Constraints Using Box Embeddings},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0HWAbWgI3T},\nnote={under review}\n}" }, "abstract": { "value": "Personalized item recommendation typically suffers from data sparsity, which is most often addressed by learning vector representations of users and items via low-rank matrix factorization. While this effectively densifies the matrix by assuming users and movies can be represented by linearly dependent latent features, it does not capture more complicated interactions. For example, vector representations struggle with set-theoretic relationships, such as negation and intersection, e.g. recommending a movie that is “comedy and action, but not romance”. In this work, we formulate the problem of personalized item recommendation as matrix completion where rows are set-theoretically dependent. To capture this set-theoretic dependence we represent each user and attribute by a hyperrectangle or box (i.e. a Cartesian product of intervals). Box embeddings can intuitively be understood as trainable Venn diagrams, and thus not only inherently represent similarity (via the Jaccard index), but also naturally and faithfully support arbitrary set-theoretic relationships. Queries involving set-theoretic constraints can be efficiently computed directly on the embedding space by performing geometric operations on the representations. We empirically demonstrate the superiority of box embeddings over vector-based neural methods on both simple and complex item recommendation queries by up to 30% overall." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Box Embeddings", "Personalized Query", "Set-based embeddings", "Recommendation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/0665cfd25e97b1b5041673c86aa04f7dc1e39edc.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/07ef1d7579792170a5f9fb07ab9682f82c54f720.zip" }, "title": { "value": "A Geometric Approach to Personalized Recommendation with Set-Theoretic Constraints Using Box Embeddings" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0HqPwbN1Su
MLGLP: Multi-Scale Line-Graph Link Prediction based on Graph Neural Networks
main
Active
link prediction;graph neural network;multi-scale graph;line graph;complex network.
unsupervised, self-supervised, semi-supervised, and supervised representation learning
3;3;6
5;3;3
2;2;3
2;2;3
2;2;2
4
3.666667
2.333333
2.333333
2
-0.5
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. What explanation can the authors provide for the discrepancy in AP values between GAE and GCN on the Cora dataset?\n2. Could the authors offer a more detailed analysis of the time complexity of their method compared to other subgraph-based approaches?\n3. Can the authors conduct ablation studies to assess the individual contributions of the multi-scale and line graph components to the overall performance of the model?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The proposed method demonstrates excellent performance, significantly improving results across various datasets.\n2. The approach appears to be straightforward to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This manuscript presents Multi-Scale Line-Graph Link Prediction (MLGLP), a multi-scale link prediction method using Graph Neural Networks (GNNs). MLGLP learns graph structures and extracts edge features to address information loss and capture complex relationships. By constructing coarse-grained graphs at three scales and converting subgraphs into line graphs, it reformulates the task as node classification. Extensive experiments on benchmark datasets demonstrate that MLGLP outperforms state-of-the-art link prediction methods in average precision and area under the curve." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. There are some notation issues; the model name is inconsistently defined throughout the paper, sometimes referred to as MLGLP, other times as MSGL or MSLGLP (as noted in the caption of Table 3), and occasionally as MSLG (in section 5.1). Additionally, the tables display varying levels of decimal precision (sometimes three decimal places, sometimes two), which should be standardized.\n2. There are concerns regarding baseline comparisons. For instance, the AP of GAE on the Cora dataset should be significantly higher than that of GCN based on the original paper, yet the authors report it being lower by ten points in their experiments, which needs to be explained to maintain credibility.\n3. The method involves sampling subgraphs, converting them to line graphs, and then performing node classification, which appears to result in high time complexity. Although the authors analyze time complexity, the discussion is not in-depth. They should compare it with the time complexity of two other subgraph-based methods and also include training time comparisons.\n4. The baselines compared in the paper seem somewhat outdated; for example, reference [1] proposes a line graph-based method for link prediction.\n5. The core innovation of this paper appears to be the application of multi-scale and line graph concepts to link prediction tasks. However, the paper lacks ablation studies on these two components, such as whether the line graph contributes to performance improvement, and it does not compare the final concatenation method. This makes it difficult to ascertain the key factors driving the model's improved performance.\n[1]Zhang Z, Sun S, Ma G, et al. Line graph contrastive learning for link prediction[J]. Pattern Recognition, 2023, 140: 109537." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "n/a" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See the weakness feedback" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* This paper introduces Multi-Scale Line-graph Link Prediction (MLGLP), a GNN approach that learns graph structures and features from edges, tackling information loss and multi-scale challenges.\n* The method constructs coarse-grained graphs at three scales to uncover complex data relationships and converts them into line graph representations, allowing for node embedding learning and reformulating link prediction as node classification.\n* Experimental results show significant performance improvements over heuristics, embeddings, and various GNN link prediction methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper tackles oversmoothing in Graph Neural Networks by proposing the use of coarse-grained graphs at three scales to capture complex relationships. Instead of pooling layers, the authors convert subgraphs into line-graphs and reformulate the task as node classification, enhancing the exploration of relationships. Applied to link prediction as a graph classification problem, the method shows superior performance over existing methods in terms of average precision and area under the curve in extensive benchmark tests." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* The comparison of training loss and AUC among LGLP, SEAL, and MLGLP demonstrates improved loss for MLGLP relative to the baselines, yet it remains unclear why MLGLP performs weaker than LGLP in the early epochs. Further clarification on this aspect would enhance the analysis.\n* Figure 5 provides valuable visual insights; however, it lacks comparisons with state-of-the-art (SoTA) methods, hindering a fair assessment of MLGLP's performance. The authors should clarify whether the identified clusters correspond to meaningful patterns and provide an experimental analysis to support this.\n* There are several presentation issues that require careful proofreading. For instance, Section 6 contains a dangling \"However\" above Table 3 that should be addressed." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. There are many papers on link prediction in line graphs; can you explain what distinguishes MLGLP from them?\n2. It's necessary to use the latest methods as baselines, such as BUDDY[1], NCNC[2], and PEG[3].\n3. I have doubts about the visualization results; in Figure 5, the blue and red points seem to overlap, indicating that MLGLP cannot distinguish between positive and negative samples. An explanation is needed.\n\nReference:\n[1] Chamberlain BP, Shirobokov S, Rossi E, Frasca F, Markovich T, Hammerla N, Bronstein MM, Hansmire M. Graph neural networks for link prediction with subgraph sketching. arXiv preprint arXiv:2209.15486. 2022 Sep 30.\n[2] Wang X, Yang H, Zhang M. Neural common neighbor with completion for link prediction. arXiv preprint arXiv:2302.00890. 2023 Feb 2.\n[3] Wang H, Yin H, Zhang M, Li P. Equivariant and stable positional encoding for more powerful graph neural networks. arXiv preprint arXiv:2203.00199. 2022 Mar 1." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The authors use multi-scale subgraphs for link prediction to capture graph information at different granularities. The approach is interesting.\n2. The authors transform the link prediction problem into a node classification problem on a line graph, which better addresses the issue of link representation.\n3. The proposed method outperforms existing methods in the experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposed a link prediction method named Multi-Scale line-graph Link Prediction (MLGLP). MLGLP used three scales to capture information at a different level of granularity. The link prediction problem is defined as a node classification problem on a line graph, which facilitates a deeper understanding of relationships within the graph. Experiments conducted on several benchmark datasets validated the effectiveness of MLGLP." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. This paper appears to be an unfinished draft, containing many textual errors (e.g., the beginning of line 349 lacks capitalization, and line 372 is missing a period) and missing sentences (e.g., line 510).\n2. The baselines chosen in this paper are outdated.\n3. The paper lacks novelty, as the proposed module is superficial and easy to conceive. The proposed method for converting to line graph is very similar to LGLP." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024mlglp,\ntitle={{MLGLP}: Multi-Scale Line-Graph Link Prediction based on Graph Neural Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0HqPwbN1Su},\nnote={under review}\n}" }, "abstract": { "value": "This manuscript proposes a multi-scale link prediction approach based on Graph Neural Networks (GNNs). The proposed method - Multi-Scale Line-Graph Link Prediction (MLGLP) - learns the graph structure and extracts effective representative features of graph edges to address challenges such as information loss and handle multi-scale information. This approach utilizes embedding vectors generated by GNNs from enclosing subgraphs. While expanding GNN layers can capture more intricate relations, it often leads to overs-smoothing. To mitigate this issue, we propose constructing coarse-grained graphs at three distinct scales to uncover complex relations. To apply multi-scale subgraphs in GNNs without using pooling layers that lead to information loss, we convert each subgraph into a line-graph and reformulate the task as a node classification problem. The hierarchical structure facilitates exploration across various levels of abstraction, fostering deeper comprehension of the relationships and dependencies inherent within the graph. The proposed method is applied on link prediction problem, which can be modelled as a graph classification problem. We perform extensive experiments on several well-known benchmarks and compare the results with state-of-the-art link prediction methods. The experimental results demonstrate the superiority of our proposed model in terms of average precision and area under the curve." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "link prediction", "graph neural network", "multi-scale graph", "line graph", "complex network." ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/50c2c85a5ce02e6c0b4f5a56dae9d624464da8ad.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/11bbf8248b61ce0b6ae21da75c8bdb677eb61639.zip" }, "title": { "value": "MLGLP: Multi-Scale Line-Graph Link Prediction based on Graph Neural Networks" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0IhoIn0jJ3
Inference of Sequential Patterns for Neural Message Passing in Temporal Graphs
main
Active
graph neural networks;temporal patterns;higher order network;random graph ensembles
learning on graphs and other geometries & topologies
3;5;5;5
3;4;3;4
2;3;2;3
1;1;3;2
1;2;2;2
4.5
3.5
2.5
1.75
1.75
0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Q1 The authors state that computational complexity may not be a limiting factor. Could the authors further clarify the complexity increased from DBGNN. How would they compare to standard temporal GNNs? Meanwhile, all datasets used for evaluation have less than 500 nodes, can the proposed method scale to larger graphs?\n- Q2 The results in Tabe 1 on synthetic data try to highlight patterns that only high-order models can discern. However, the results are not convincing or interpretable, especially the discussion of the baseline HONEM (even a strong one in Table 2) is very limited.\n- Q3 The proposed method claims to have better interoperability by introducing HYPA. Could the authors elaborate more on how it is made more expressive by not relying on the transitivity assumption?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The idea of augmenting the input graph for message passing using a statistical null model to detect abnormal temporal patterns and distinguish sequences beyond frequency is interesting.\n\n- The adapted HYPA offers an interpretable way to identify unusual sequences in dynamic graphs, and the proposed HYPA-DBGNN achieves improved performance over baseline models on multiple empirical datasets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies how to model temporal patterns in dynamic graphs and proposes to use statistical graph inference to identify sequence anomalies for graph augmentation and perform message passing on it to capture inductive biases of sequence patterns. The effectiveness of the model is tested on a synthetic dataset and five empirical datasets for static node classification." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The core techniques of using De Bruijn graphs and hypergeometric testing are well established in time series data analysis. The proposed HYPA-DBGNN is, to some extent, an interesting adaptation for GNNs.\n\n- Using De Bruijn graphs with statistical augmentation is a sound approach. However, the paper would benefit from more discussion on why it is optimal for this purpose under the setting for node classification on time-varying graphs, rather than simply improving from DBGNN.\n\n- The evaluation focuses on a limited set of small human interaction networks. Testing on a more diverse set of temporal datasets would better substantiate the model’s broader applicability and generalizability across domains." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- Figure 1: Why we construct a higher-order edge for count 0? Besides, shouldn't we have an arrow from (a) to (d) since we also need 1-order counts to construct 1-order graph in (d)?\n- Figure 1: You should extend the figure with how null model in (b) and weights in (c) are really generated, or provide in appendix? This figure fails to explain what you did for (b) and (c) given the poor explanation of section 4.\n- line 262: What is $X_{uv}$ and $f(u, v)$? Why they are independent from order $k$?\n- line 282: Shouldn't $H(v)$ rely on order $k$ based on your definition? Same to equation (1).\n- Page 6: Mix use of higher order nodes and nodes make the notation is bit hard to follow, recommend to replace $v$ by $v^{(k)}$ in all related content, or vector form $\\mathbf{v}$. Then, you can claim that $k = 1$ is omitted by default.\n- line 292: Why map $h^{1, 0}$ to $h^{k, 1}$ rather than $h^{k, 0}$?\n- line 295: Can you provide more explanation how this bipartition is analogous to Markov chain?\n- line 304: What is $g$?\n- Why this design is limited to temporal node classification? I think this architecture can be used for regression without any modification.\n- line 331-351: Hyperparamter configuration can be moved to appendix so that you can have more space to improve clarity of algorithm design sections.\n- Experiment: You are comparing with a lot of simple baselines for static graph with only on temporal graph baseline. Based on [1], static and temporal graph representation are indeed equivalent, especially you are performing static node classification on temporal graph. Why you don't compare with other basics such as GAT, GIN, TGAT, DySAT (see [1]), and other state-of-the-art like PNA, PINE, GraphTransformer.\n- Given that TGN is designed mainly for evolving graph, should you make some modification to make comparison fair? For example, average node representation of different timestamps for perform static node classification on temporal graph? \n- Your font looks different from template. I think you need to check if you are using the template correctly.\n\n[1] Gao, Jianfei, and Bruno Ribeiro. \"On the equivalence between temporal and static equivariant graph representations.\" International Conference on Machine Learning. PMLR, 2022." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- This work focuses on static node property classification on temporal graph, which is task lack of exploring." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work focuses on relatively novel task, static node property classification for temporal graphs. Different from common trend of temporal graph neural networks, it proposes HYPA-DBGNN that extends a previous work GBGNN (which combines static hyper-order graph neural network on a high-order De Bruijn Graph constructed from time series) by null model correction." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The notation is lack of consistency, making it hard to follow and the clarify of method details being quite poor. (See questions)\n- This paper focuses on a rare task, which I think more real-world justification is needed? For example, what are real-word scenarios? You can pick one of your dataset to explain this in more detail.\n- The contribution of the proposal is slightly unclear. It seems that this work simply extends related work DBGNN by introducing null model correction. If my understanding is correct, I think more theoretical justification of the necessity of this correction should be provided, otherwise, the contribution seems to be limited.\n- In both the synthetic and real-world experiments, the variance are very large, makes me doubt if the problem is formalized correctly.\n- Compare to highly related baseline DBGNN, the experiment results is not quite impressive (confident interval overlaps a lot in many tasks). This makes the contribution of null model correction less sound given no theoretical justification of the necessity." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Does this new inductive bias lead to a provably more expressive GNN than previous temporal MPNNs?\n2. What is the run-time scaling of HYPA-DBGNN? All experiments were run on quite small graphs, so it's hard to understand how scalable of a technique this is.\n3. To what extent has hyperparameter tuning been performed?\n4. What explains HONEM's good performance in 5.1?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper is well organized and motivated.\n2. The problem of extracting complex relationships from transitions between vertices is an interesting problem with many industrial applications\n3. The experiments that are presented appear to be carefully performed and well motivated. The results as presented provide evidence that the method works." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work introduces a model termed HYPA-DBGNN, which seeks to improve the ability of a GNN in temporal settings to learn high-order time dependent interactions. HYPA-DBGNN has two components, HYPA which detects the ``surprise'' of observing a specific walk, and DBGNN which performs a hypergeometric walk feature extraction. The authors detail this model as an extension of DBGNN, and present experiments which show promising performance gains." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper is unclear in spots. For example, The concept of a De Bruijn graph is mentioned but its basic properties are not discussed.\n2. The mathematical notation is intricate and can be difficult to follow, with some symbols overlapping with standard symbols from the literature. For example, $H(v)$ is the sum of $HYPA$ factors but is traditionally the hidden representation for all vertices.\n3. The intuition for \n4. Minor typos and grammatical issues make the paper somewhat difficult to follow. For example, `fist` -> `first` on line 314. \n5. Experiments in section 5.2 seem to lack many modern baselines including CAWN, TGAT, DySAT, and others. I would recommend that the authors add additional baselines. Random walk GNNs such as RWGNN could be applicable here as well, as could transformer architectures.\n6. The experimental setup is unclear in spots, the baselines may have been untuned, and the graphs are small." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Could the authors explain the role of De Bruijn graphs?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper introduces De Bruijn graphs into temporal graph analysis, which I find to be a novel approach.\n\n2. The paper conducts extensive experiments to demonstrate the effectiveness of the proposed method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces HYPA-DBGNN, a graph augmentation architecture focused on temporal graph learning. It encodes sequential pattern dynamics in first- and higher-order De Bruijn graphs and corrects graph structures using anomaly statistics. HYPA-DBGNN computes HYPA scores via hypergeometric ensembles to assess edge frequency differences from a random model, adjusting weights to improve accuracy. It uses a multi-order message passing scheme with inductive bias, incorporating HYPA scores and ReLU activation while preserving graph sparsity to optimize efficiency." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.The paper's exposition is not very clear, with many key pieces of information relegated to the appendices.\n\n2.The paper does not clearly explain why the introduction of De Bruijn graphs enhances performance, making it seem more like a simple combination of existing methods.\n\n3.The explanation of the method is insufficiently clear; a framework diagram could be helpful." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024inference,\ntitle={Inference of Sequential Patterns for Neural Message Passing in Temporal Graphs},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0IhoIn0jJ3},\nnote={under review}\n}" }, "abstract": { "value": "The modelling of temporal patterns in dynamic graphs is an important current research issue in the development of time-aware Graph Neural Networks (GNNs).\nHowever, whether or not a specific sequence of events in a temporal graph constitutes a temporal pattern not only depends on the frequency of its occurrence.\nWe must also consider whether it deviates from what is expected in a temporal graph where timestamps are randomly shuffled.\nWhile accounting for such a random baseline is important to model temporal patterns, it has mostly been ignored by current temporal graph neural networks.\nTo address this issue we propose HYPA-DBGNN, a novel two-step approach that combines (i) the inference of anomalous sequential patterns in time series data on graphs based on a statistically principled null model, with (ii) a neural message passing approach that utilizes a higher-order De Bruijn graph whose edges capture overrepresented sequential patterns.\nOur method leverages hypergeometric graph ensembles to identify anomalous edges within both first- and higher-order De Bruijn graphs, which encode the temporal ordering of events. \nConsequently, the model introduces an inductive bias that enhances model interpretability.\n\nWe evaluate our approach for static node classification using established benchmark datasets and a synthetic dataset that showcases its ability to incorporate the observed inductive bias regarding over- and under-represented temporal edges. \nFurthermore, we demonstrate the framework's effectiveness in detecting similar patterns within empirical datasets, resulting in superior performance compared to baseline methods in node classification tasks. \nTo the best of our knowledge, our work is the first to introduce statistically informed GNNs that leverage temporal and causal sequence anomalies. \nHYPA-DBGNN represents a promising path for bridging the gap between statistical graph inference and neural graph representation learning, with potential applications to static GNNs." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "graph neural networks", "temporal patterns", "higher order network", "random graph ensembles" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/ffdedcbda286398e2daab30612a94beffaa3b0c3.pdf" }, "presentation": null, "primary_area": { "value": "learning on graphs and other geometries & topologies" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/dee936e80131a71d50645d4d4124e91e375ca53c.zip" }, "title": { "value": "Inference of Sequential Patterns for Neural Message Passing in Temporal Graphs" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0IqriWHWYy
Watch Out!! Your Confidence Might be a Reason for Vulnerability
main
Active
Confidence;Robustness;Natural Adversaries;Object Recognition
interpretability and explainable AI
3;3;5;5
4;4;3;5
1;2;2;3
1;2;2;2
2;2;3;4
4
4
2
1.75
2.75
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Applicability Across Architectures: The proposed method seems tailored primarily for convolutional neural networks (CNNs). A major gap lies in assessing how well SWAG might generalize to other architectures, such as Transformers, which have become prevalent in vision tasks. Expanding the discussion on generalizability or including Transformers in the experimental setup could enhance the study's relevance and adaptability to current deep learning trends.\n\n2. Novelty in Approach: While the study reinforces known concepts around calibration and robustness, these insights are not novel, particularly within the Bayesian deep learning community, where calibration’s role in improving robustness under adversarial scenarios is well-understood [refA, refB]. This limits the paper's contribution, as it primarily confirms existing knowledge rather than pushing the boundaries with a novel calibration approach. Introducing an innovative calibration technique, or a modified variant of SWAG tailored for robustness, would provide a more substantial contribution.\n\n3. Experimental Limitations: The experiments focus on CIFAR datasets and CNNs, which are both limited in size and scope. A broader evaluation involving larger datasets like ImageNet, and a wider range of architectures, including Transformer-based models, would offer a stronger validation of SWAG's effectiveness. This could also strengthen the paper’s generalizability claims and its relevance for real-world deployment.\n\nReferences\n\n[refA] Wicker, Matthew, et al. \"Bayesian inference with certifiable adversarial robustness.\" International Conference on Artificial Intelligence and Statistics. PMLR, 2021.\n\n[refB] Stutz, David, Matthias Hein, and Bernt Schiele. \"Confidence-calibrated adversarial training: Generalizing to unseen attacks.\" International Conference on Machine Learning. PMLR, 2020." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper presents a detailed investigation into the impact of calibration on model robustness, especially under naturally occurring corruptions. By systematically exploring the role of confidence in model predictions, the authors contribute to understanding the relationship between calibration and robustness, reinforcing SWAG’s potential in addressing natural corruption without introducing additional computational burden associated with adversarial training." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes using Stochastic Weight Averaging Gaussian (SWAG) as a method for calibrating neural networks, aiming to improve their performance and robustness against natural corruptions. The approach leverages SWAG's capacity to model uncertainty and enhance prediction reliability, asserting that better-calibrated confidence scores contribute to robustness in challenging real-world conditions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Limited Novelty: The paper largely relies on the established SWAG technique without introducing new calibration methods or adaptations specific to the architecture or the problem of natural corruption.\n\n2. Experimental Scope: The experimental evaluations are confined to small datasets (CIFAR-10 and CIFAR-100) and convolutional architectures like VGG and ResNet, lacking analysis on larger-scale datasets and modern architectures like Transformers." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please refer to the weakness." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "1. It explores the vulnerability of DNNs from the perspective of model overconfidence.\n2. The article is well-structured and relatively clear in its presentation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper explores the challenges DNNs face from natural adversarial corruptions, which can undermine their robustness. While past work has focused on detecting and mitigating these corruptions, this study examines whether a model’s confidence may contribute to its vulnerability." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. What exactly is the novelty of this paper? SWAG is not your contribution; merely using it to derive some results for analysis does not suffice.\n2. The phenomenon of model overconfidence appears to be only a description in your paper. Do you have specific examples or experimental results to substantiate this claim?\n3. Is your method limited to CNN architectures? Given the prevalence of transformer-based models, a method solely applicable to CNNs may have limited relevance, and it appears you tested on a very small set of CNN models.\n4. Based solely on the text, I cannot appreciate the superiority of your method. Please provide comparative experiments with adversarial training methods, covering dimensions such as effectiveness and cost. Furthermore, does your method apply only to natural corruptions? How would it perform against adversarial samples?\n5. The experiments lack depth: (1) In terms of models, this paper tests only on VGG-16 and ResNet, which seems rather limited. Where are the tests on more advanced models? (2) In terms of datasets, you only used CIFAR-10 and CIFAR-100, yet experiments on ImageNet are also necessary." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Although the proposed method addresses natural corruption, its effectiveness against gradient-based adversarial attacks remains unclear. It is recommended that the authors conduct experiments involving FGSM, PGD, and C&W attacks to evaluate the method's performance under adversarial attacks. For example, performance under different noise magnitudes and different numbers of attack iterations could be assessed." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Proposing that high confidence might lead to model vulnerability in naturally corrupted environments is a novel perspective, differing from traditional defense methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates the vulnerability of deep neural networks (DNNs) when facing natural corruptions (such as noise, blur, etc.) and proposes that the model's confidence could be an important factor contributing to this vulnerability. Experiments demonstrate a significant correlation between a model’s confidence and its robustness in handling corruption. The study primarily focuses on calibrating model confidence and employs the Stochastic Weight Averaging Gaussian (SWAG) method to enhance model robustness." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. This paper only conducts experiments on convolutional neural networks (CNNs), lacking tests on other network architectures such as ViT-B/16 or DeiT, to validate the conclusions about confidence and robustness across different model types. This would provide a more comprehensive demonstration of the method's applicability and effectiveness.\n\n2. This paper validates the robustness of the model to natural corruptions and its relationship with confidence using CIFAR-10 and CIFAR-100 datasets. However, the complexity of these datasets is relatively low, making it difficult to fully reflect the model's performance in real-world complex scenarios. It is recommended to conduct further experiments on more challenging datasets such as ImageNet-C or ImageNet-A, which include a broader range of corruptions (e.g., Gaussian noise, motion blur, weather-induced degradation, digital transformations) and better reflect the diversity and complexity of real-world applications. \n\n3. The paper mainly focuses on confidence calibration without an in-depth comparison with other advanced defense methods (such as adversarial training), which may weaken the practical applicability of this approach." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- For Figure 1, how is the reliability plot plotted? Specifically, which hyper-parameter is adjusted to control the confidence of the model, and how is it adjusted? \n- How does the proposed method perform on larger datasets like ImageNet-C? Can this method survive different set of natural corruptions other than those in CIFAR-10-C, e.g., ImageNet-P, ImageNet-$\\bar{C}$ [1]? \n\n\n\n[1] On interaction between augmentations and corruptions in natural corruption robustness. NeurIPS 2021." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- **Novel insight**: this paper is the first to leverage model calibration method to mitigate natural corruptions. \n- **Promising experiment results**: the leveraged SWAG method substantially improves the robustness of CNNs against natural corruptions. \n- **Well-written paper**: the paper is well-organized and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates the correlation between the confidence of deep neural networks and their vulnerability to natural corruptions. Specifically, the authors leverage the model calibration method SWAG to construct a smoothed model. The parameter of this model is sampled and averaged from the estimated Gaussian distribution of several versions of model parameters recorded during training. The evaluation on the widely used natural corruption benchmark CIFAR-10-C for VGGNet and PreActResNet has shown the robustness of the smoothed model against natural corruptions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- **Limited technical contribution**: the methodology in Section 3 is originally proposed in SWA and SWAG. This paper has not introduced further adjustment or improvement when applying the method to mitigating natural corruptions. \n- **Lacking theoretical analysis**: the experiments have shown the effectiveness of SWAG in improving the robustness against corruptions. However, no theoretical analysis is provided to help better understand the source of the robustness." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Assessing the Impact of Overconfidence on CNN Robustness" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024watch,\ntitle={Watch Out!! Your Confidence Might be a Reason for Vulnerability},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0IqriWHWYy},\nnote={under review}\n}" }, "abstract": { "value": "The tremendous success of deep neural networks (DNNs) in solving `any' complex computer vision task leaves no stone unturned for their deployment in the physical world. However, the concerns arise when natural adversarial corruptions might perturb the physical world in unconstrained images. It is widely known that these corruptions are inherently present in the environment and can fool DNNs. While the literature aims to provide safety to DNNs against these natural corruptions they have developed two forms of defenses: (i) detection of corrupted images and (ii) mitigation of corruptions. So far, very little work has been done to understand the reason behind the vulnerabilities of DNNs against such corruption. We assert that network confidence is an essential component and ask whether the higher it is, the better the decision of a network is or not. Moreover, we ask the question of whether this confidence itself is a reason for their vulnerability against corruption. We extensively study the correlation between the confidence of a model and its robustness in handling corruption. Through extensive experimental evaluation using multiple datasets and models, we found a significant connection between the confidence and robustness of a network." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Confidence", "Robustness", "Natural Adversaries", "Object Recognition" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/d41290c6d35bdcdbca32d04148c1f444f3f0ca6d.pdf" }, "presentation": null, "primary_area": { "value": "interpretability and explainable AI" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Watch Out!! Your Confidence Might be a Reason for Vulnerability" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0JOhLEf2bX
Proteome-wide prediction of mode of inheritance and molecular mechanism underlying genetic diseases using structural interactomics
main
Active
Mode of inheritance;Functional effect;Genetic diseases mechanism;Graph neural networks;Graph-of-graphs;Structural interactomics
applications to physical sciences (physics, chemistry, biology, etc.)
3;3;5
3;4;4
2;2;2
1;1;2
2;1;3
3.666667
3.666667
2
1.333333
2
0.5
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "In order to improve the paper, the authors could perform hyperparameter tuning and give more information on the hyperparams used. They could compare against a higher number of methodologies and provide better ways to convey the results (confusion matrices may help). Morevoer, a literature search to verify the enriched terms could be performed." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The main strengths of the paper are the following:\n\n1) The paper is well-written and easy to follow.\n\n2) The problems addressed in the paper are relevant\n\n3) It is extremely interesting to have a methodology able to address both MOI and functional effects prediction instead of needing to rely on two different strategies for the two tasks.\n\n4) The bioinformatics-related work and processing is accurate.\n\n5) The figures provided help convey the message of the authors more effectively." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors present a methodology able to detect both mode of inheritance (MOI) of proteins encoded by autosomal genes and the functional effects of gene variants. The strategy relies on established architectures like GCN, GAT, and GIN, using protein-protein interaction (PPI) data for MOI prediction (node classification) and protein structures obtained by AlphaFold for function prediction (graph classification). The author compared their method with two established strategies, one for MOI prediction (LDA) and one for functional effect prediction (SVM). The results reported by the authors show better metrics for their methodology. To inspect the biological validity of their results, the authors performed an enrichment analysis and determined the most influential features for the predictions via XAI." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The main weaknesses of the paper are the following:\n\n1) The authors did not provide any code. This impinges on the reproducibility and further evaluation of their methods and results.\n\n2) From the methodological point of view, there seems to be not much novelty. The authors use established architectures \"out-of-the-box\" to tackle the proposed tasks.\n\n3) It seems that the authors did not perform any parameter tuning on their models. Additionally, no information on the hyperparameters used in the model is provided. The authors state they use dropout and weight decay, but no value for those hyperparameters is shown.\n\n4) By reading the paragraph \"Training and evaluation,\" it seems that the authors split the dataset into just two sets and not into training, validation, and test sets. They probably only used training and test sets if they did not perform hyperparameter tuning.\n\n5) Regarding the explainability phase, IntegrateGradients was used to obtain global feature importance attributions by averaging the attributions of correctly predicted samples. I am not sure this is the correct approach to obtain global feature attributions. Leaving out the wrongly predicted samples from the averaging process may produce biased results. I suggest using global feature attribution methods instead. One example can be SAGE (Covert, Ian, Scott M. Lundberg, and Su-In Lee. \"Understanding global feature contributions with additive importance measures.\" Advances in Neural Information Processing Systems 33 (2020): 17212-17223), among others.\n\n6) Comparing against just one methodology per task (LDA and SVM) seems to me not enough to evaluate the performance of the strategy.\n\n7) Given the tasks are multiclass classifications on unbalanced datasets, showing the results in terms of precision, recall, and F1 only without specifying the type of averaging strategy (micro, macro) used or without providing a confusion matrix conveys too little information to really understand the accuracy of the method (in particular class-wise).\n\n8) The enrichment analysis reports only the enriched terms, but there are no links to the literature that confirm or better describe the association between the enriched terms and the proteins.\n\nOverall, given the strong bioinformatics focus, I believe that after some revisions the paper can be accepted in a more specialized venue/journal, but given the limited methodological contribution and the flaws/imprecisions in model training and evaluation, I am afraid the work is not ready for publication in a high-impact machine-learning-focused conference at its current state." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- the features for the nodes at each scale seem to be engineered and not learnable: is it true?\n- couldn't one learn a graph encoding from the residue level and add it to the features at the protein interaction scale?\n- all empirical results are reported without a notion of dispersion; is it possible to repeat the experiments to get a measure of variance to understand the significance of the results?\n- when comparing multiple approaches could you use a critical diagram of differences (e.g. https://scikit-posthocs.readthedocs.io/en/latest/generated/scikit_posthocs.critical_difference_diagram.html)\n- Page 8 lines 417: why are the results notable? what would the enrichment analysis of a random set of protein yield instead? how about a non-random baseline, e.g. a nearest neighbour predictor." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The integration of information at multiple scales is of interest." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose an approach to predict the likelihood for a protein to result in a disease if a mutation occurs on one of the inherited copies using a graph neural networks method. They propose to use two scales to create a graph of graphs representation: at a protein level nodes are entire proteins and edges are interactions between proteins and at a residue level nodes are amino acids and edges are the type of bonds between these." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The authors do not present a method capable to integrate information at various scales but rather work independently at each scale without exploiting any form of communication between scales." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see the Weaknesses part. Thank you!" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The graph of graphs idea to predict the mode of inheritance of diseases is novel.\n\nThe methods are described in great detail and with persuasive experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper gives a framework to predict the mode of inheritance of diseases and classify dominant-associated proteins based on their functional effect. The biggest highlight is its use of a graph-of-graphs idea to combine the protein-protein interaction networks and high-resolution protein structure." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My biggest concern is that although this work seems to be relevant for predicting mode of inheritance and classifying functional effects, its contribution to deep learning models in the application domain of biology is insufficient. It is well known that GCN, GIN,GAT are three very classical GNN models.\n\nAnd, the graph in graphs idea is also similar to the idea of the paper [1]. So, as far as ICLR is concerned, I think this may not be a notable paper for the community. \n\nAlso, I would suggest that the author modify the size of each figure to make the content and fonts in the figures look a little more harmonious.\n\n[1] Gao Z, Jiang C, Zhang J, et al. Hierarchical graph learning for protein–protein interaction[J]. Nature Communications, 2023, 14(1): 1093." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We used a graph-of-graphs approach to combine protein-protein interaction network with protein structures. Then we used graph neural networks to predict mode-of-inheritance and functional effects." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024proteomewide,\ntitle={Proteome-wide prediction of mode of inheritance and molecular mechanism underlying genetic diseases using structural interactomics},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0JOhLEf2bX},\nnote={under review}\n}" }, "abstract": { "value": "Genetic diseases can be classified according to their modes of inheritance and their underlying molecular mechanisms. Autosomal dominant disorders often result from DNA variants that cause loss-of-function, gain-of-function, or dominant-negative effects, while autosomal recessive diseases are primarily linked to loss-of-function variants. In this study, we introduce a graph-of-graphs approach that leverages protein-protein interaction networks and high-resolution protein structures to predict the mode of inheritance of diseases caused by variants in autosomal genes, and to classify dominant-associated proteins based on their functional effect. Our approach integrates graph neural networks, structural interactomics and topological network features to provide proteome-wide predictions, thus offering a scalable method for understanding genetic disease mechanisms." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Mode of inheritance", "Functional effect", "Genetic diseases mechanism", "Graph neural networks", "Graph-of-graphs", "Structural interactomics" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/6b312f4de5f079ef410b9f2b8d3244b64828237a.pdf" }, "presentation": null, "primary_area": { "value": "applications to physical sciences (physics, chemistry, biology, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Proteome-wide prediction of mode of inheritance and molecular mechanism underlying genetic diseases using structural interactomics" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0JcPJ0CLbx
Revisiting MAE pre-training for 3D medical image segmentation
main
Withdraw
self-supervised learning;medical image segmentation;foundation models;medical image computing;CNN;nnU-Net
unsupervised, self-supervised, semi-supervised, and supervised representation learning
Tassilo Wald;Constantin Ulrich;Stanislav Lukyanenko;Andrei Goncharov;Alberto Paderno;Leander Maerkisch;Paul F Jaeger;Klaus Maier-Hein
~Tassilo_Wald1;~Constantin_Ulrich1;~Stanislav_Lukyanenko1;~Andrei_Goncharov1;~Alberto_Paderno1;~Leander_Maerkisch1;~Paul_F_Jaeger1;~Klaus_Maier-Hein1
3;3;3;6
4;5;4;4
2;2;2;3
2;1;2;3
1;1;3;3
3.75
4.25
2.25
2
2
-0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "Given the Scores and the Reviews, we withdraw the paper from ICLR and will revise it.\nWe want to thank the reviewers for their time and their mostly constructive feedback. \n\n_Despite withdrawing, we believe some points of criticism are disputable and the following should be noted:_\n\n> Claim of first working MAE baseline without showing \"Self-supervised pre-training of swin transformers for 3d medical image analysis.\" and \"Masked image modeling advances 3d medical image analysis.\" are not working\n\nWe are certain that we are the first to show convincing results of MAE pre-training but agree that we should provide additional evidence of SwinUNETR and the other Transformer MAE Baseline being sub-par. We originally believed the known deficiencies of SwinUNETR and transformers in 3D medical image segmentation to be sufficient in itself, but will provide evidence in future versions. \n\n> Claiming to train on 44k volumes is misleading, as we filter down to 39k\n\nThis will be reworked in the future version. \n\n> Scaling should increase data and parameters\n\nOriginally this scaling was conducted to allow adaptation of the architecture on smaller consumer-GPUs as was stated in the manuscript. Depite this we agree that this scaling is suboptimal and that the naming convention is confusing. We will provide a better scaling scheme/paradigm and in the future.\n\n> Partially limited novelty as pre-trained models are not publicly available\n\nWe agree that public pre-trained weights would improve the contribution, hence we will provide pre-trained weights in a future version, created on the public 41k volume large ABCD dataset.\n\n> Missing reference to AMAES: Augmented Masked Autoencoder Pretraining on Public Brain MRI Data for 3D-Native Segmentation.\n\nWhile we would like to use this publicly available dataset, we want to denote that there is no simple way of obtaining it. Many singular data-usage requests need to be conducted, and singular datasets come with specific hurdles associated with use of their data. E.g. PPMI requires users to get papers administratively reviewer, `If I seek to publish manuscripts using data from PPMI, I agree to follow the guidelines established and written out in the PPMI Publications Policy, including sending manuscripts to the PPMI Data and Publications Committee (DPC) for administrative review.` https://ida.loni.usc.edu/collaboration/access/appLicense.jsp . Same goes for some datasets like OASIS-3, or ADNI: _\"If I publish manuscripts using data from ADNI, I agree to the following:\n`On the by-line of the manuscript, after the named authors, I will include ADNI as an author\nby using the phrase \"for the Alzheimer's Disease Neuroimaging Initiative*\" with the asterisk\nreferring to the following statement and list of names\"` Which the original paper even violates https://arxiv.org/pdf/2408.00640 \n\nHaving said this, we want to thank all the reviewers again for their time and effort.\nCheers" }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": { "value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors." } }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* How is the finetuning implemented? Does the finetuning use nnUnet or a nnUnet like framework?\n* Will the authors release pretrained weights and results on public data, such as Brains-45K?\n* The authors use a patch size of 160^3, however this is not standard by nnUNet. What is the performance improvement over using 128 or 96 standard in many previous works?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is a very timely and relevant contribution to the field of medical image segmentation where the use of self-supervised pretraining is still in its infancy. The paper is clearly written, and proposes a simple, yet effective framework for pretraining for 3D medical segmentation downstream tasks. The analysis of design choices contains valuable insights. The evaluation is thorough, and compares to the most important baseline methods. While not a novel method, getting the sparse convolution MAE to work in 3D is non-trivial, and making the implementation of this public is a sizeable contribution." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a SSL framework, called nnSSL, for 3D medical image segmentation based on a MAE strategy and thorough evaluation of various design choices. The paper pretrains on a dataset of 44K private MRI and designs a SSL framework using 5 public datasets and uses 7 public datasets for further evaluation and comparison to SOTA methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* (W1) The paper does not provide important details on how the weights are transfered for finetuning. Is finetuning performed in the nnUnet framework? Which augmentations are used when finetuning? Are the learning-rate, augmentations etc. fixed for all evaluation datasets? As noted by the authors, selecting an appropriate configuration for each dataset is important. I assume that the configuration is also dynamic for S3D, however the paper does not contain any mention of how this is achieved with pretrained weights.\n* (W2) The authors use a patch size of 160^3 which is significantly larger than most previous works, however does not provide any ablations of the effect of this. The proposed performance gains therefore cannot be ruled out to be mainly from using a larger patch size.\n* (W3) The paper lacks references to important related work. Specifically, the authors are suggested to include the following two articles in the related works section:\n - SSL with a convolutional MAE for 3D MRI segmentation on a **public** dataset of 44K MRI scans, which similarly revisits various design choices for CNN pretraining, yet with inferior evaluation: [1]\n - Implementation of Spark-like sparse masking for large-scale 3D medical image pretraining: [2]\n* (W4) The notes on scaling and the S3D-L variant is misleading since it does not use a model of larger size, yet is scaled in other ways. This meaningfully departs from the established literature, and the authors are encouraged to find another way of communicating the different training setup. Scaling the model and data sizes are important ingredients in compound scaling, yet none of these are performed.\n* (W5) The pretraining dataset is private and only limited information on the nature of this dataset is included. For reproducibility purposes, it would be beneficial for the community if the authors would release checkpoints trained on Brains-45K (similar size to the used dataset) from [1].\n* (W6) The abstract mentions pretraining is on a dataset of 44K 3D MRI volumes, however the actual pretraining dataset is 39K volumes after filtering out low-quality data. This discrepancy is misleading. \n\nReferences:\n\n[1] Munk, Asbjørn, et al. \"AMAES: Augmented Masked Autoencoder Pretraining on Public Brain MRI Data for 3D-Native Segmentation.\" _arXiv preprint arXiv:2408.00640_ (2024).\n\n[2] Tang, Fenghe, et al. \"Hyspark: Hybrid sparse masking for large scale medical image pre-training.\" _International Conference on Medical Image Computing and Computer-Assisted Intervention_. Cham: Springer Nature Switzerland, 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "What is the reason for excluding the existing 3D MAE SSL pretraining frameworks for medical images from Table 3?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. Benchmarking SSL pretraining strategy is absolutely important in all fields of AI, including medical vision. \n2. This involves a large-scale pretraining dataset, ~40k brain scans. \n3. The downstream evaluation sets are also diverse. \n4. The presentation is easy to follow, but it certainly can be further improved." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper aims to benchmark various mask autoencoder (MAE) or masked image modeling (MIM) pretraining configurations to determine the optimal one for 3D medical image segmentation using CNN. It collected a large-scale MRI dataset containing around 40k scans for pretraining. The pre-trained model was then applied and evaluated on 13 datasets/tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The reviewer rated soundness as 2 and contribution as 2, given the following reasons:\n\nSoundness:\n \n1. Given the current experiment setups, it is insufficient to conclude the optimal pretraining strategy. \n* The patch size of MAE is a critical parameter, while Kaiming's MAE paper [1] did not ablate on that, some other studies ablated on that parameter and found significant performance differences [2-4]. This paper utilized a patch size of 5x5x5 in the bottleneck, equivalent to 32x32x32 in image space. It seems **too large** for the 3D MAE. Both [3] and [4] indicate in the 3D medical image, a high masking ratio and a small patch size are the key ([3] used a patch size of 16x16x16, [4] used 8x8x8). \n* Regarding scaling of MAE pretraining, this paper only investigates having 8x batch size, larger learning rate, and 4x training iterations. Those are not keys to evaluating scaling. Scaling more refers to performance gain with an **increase in data size** and an **increase in model parameters**. On high pretraining iterations, the impact of larger batch size and learning rate may not be significant. Extending training iterations may also not help as the MAE training tends to saturate after prolonged training ([1] Fig 7 upper, 800 vs. 1600 epochs are very close, 84.9 vs. 85.1). So what will be really interesting to see is to ablate on 1. **training on 10%, 25%, 50%, 75%, 100% of 40k pretraining datasets**; 2. **varying model's depth to see how performance changes with model size**. In addition, the naming of S3D-L is very **misleading**, as -L always indicates a larger model (with more parameters) in the ML naming convention. \n\nThe above two reasons lead to a rating of soundness of 2, as without experiments on those two perspectives, it is hard to conclude the current manuscript presents the optimal strategy. \n\nContribution: \n\nThe reason for a rate of 2 in the contribution is that the current manuscript, entitled 'Revisiting MAE pre-training for 3D medical image segmentation', did not include any comparison with previous studies that utilized MAE pretraining for 3D medical image analysis, notably [3, 5]. Instead, it only involves comparisons with Model Genesis, Volume Fusion, and VoCo. \n\nThe contribution of the current study will be much higher if compared to the existing 3D MAE pretraining framework developed for medical images (i.e., [3,5]). \n\nOthers:\n\nThe quality of Fig. 2 can be improved. \n\n**Overall**, the reviewer recommends rejection because the technical flaws and a lack of comparison with existing 3D MAE frameworks (as presented above) outweigh the benefits brought by large-scale datasets and diverse downstream evaluations. \n\n* [1]: He, Kaiming, et al. \"Masked autoencoders are scalable vision learners.\" Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2022.\n* [2]: Xie, Zhenda, et al. \"Simmim: A simple framework for masked image modeling.\" Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2022.\n* [3]: Chen, Zekai, et al. \"Masked image modeling advances 3d medical image analysis.\" Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. 2023.\n* [4]: Zhang, Xuzhe, et al. \"MAPSeg: Unified Unsupervised Domain Adaptation for Heterogeneous Medical Image Segmentation Based on 3D Masked Autoencoding and Pseudo-Labeling.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024.\n* [5]: Tang, Yucheng, et al. \"Self-supervised pre-training of swin transformers for 3d medical image analysis.\" Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2022." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- Should the captions for the tables be placed above the tables instead of below?\n- Should the writing issues and figure problems mentioned in the Weakness section be revised?\n- Can an explanation be provided for the performance degradation observed with scaling up (Table 3)?\n- Does the final model's performance degrade when sparsification adaptations are not used?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The authors conducted a substantial number of experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors identified three key issues in 3D medical image computing and proposed corresponding solutions. They employed the Masked Auto Encoders (MAE) method for pre-training the model within the existing framework, achieving better performance compared to previous SSL methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper resembles a technical report rather than an academic paper, lacking demonstration of its innovation. It addresses the problem by simply combining methods without discussing the essence of the issue. MAE has already proven its competitiveness in previous work, yet the paper merely applies MAE to the backbone without further exploration.\n- The writing quality is poor, especially with the confusing use of symbols (e.g., the confusion of D[1\\~9], DS[1\\~9], and dataset names). The excessive use of items and textbf (too much bold text reduces readability) and quotes (all right quotes, which can be displayed correctly in $\\LaTeX$ using \\`word') makes the paper difficult to read.\n- The paper lacks a symbolic representation and adequate explanation of the task setup and model, instead focusing extensively on dataset selection and hyperparameter choices.\n- The figures are confusing. Figure 1 is hard to understand, appearing to mix development and testing in a workflow without showing the model pipeline. Figure 2 is poorly organized, with excessive whitespace and mixed use of pie and bar charts. Figure 3 seems to be generated directly by a program, lacking good organization and sufficient explanation, with a lot of meaningless whitespace.\n- The paper lacks visualizations of some results.\n- The experimental section only describes performance improvements and changes in tables without further discussion. The results show that the model does not achieve significant performance gains in many experiments (the large model size yields only slight improvements or none at all), suggesting that simply applying MAE does not produce sufficiently good results, and the authors do not propose better methods.\n- From Table 1-a, it can be observed that model performance improves based on some sparsification adaptations, raising doubts about whether the results in Table 3 are achieved by stacking tricks rather than the method itself. Table 1-c shows no performance improvement from scaling, and Table 3 even shows performance degradation due to scaling, without explanation, which is disappointing for the method." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "## Questions\n- L213: \"When masking the input image, CNNs are not able to ignore the masked regions in the same manner as transformers can.\" Can you elaborate on this? (I also suggest doing so in the paper). Why would you want to ignore the masked regions? My understanding is that, the model should learn how to reconstruct those regions.\n- L249: \"a [3x3x3] convolution is applied to the feature maps at every resolution except the highest resolution to prepare the representations for decoding\". What do you mean by \"prepare\" here? why do they need to be \"prepared\"?\n- Table 1. What does the underlining indicate?\n- Table 2. What does it mean \"Tr. Stage\"? is it \"Fine-tuning stage\"?\n\n\n## Suggestions / Other comments\n- The title generalizes to \"3D Medical Image segmentation\" but the experiments are only on brain MRI. I suggest specifying that in the title.\n- In the abstract and introduction, the reader is introduced to the concept of \"development dataset\" (L20), which, to me, it wasn't clear until much later.\n- The contributions listed in the introductions were in the form of \"we evade Pitfall X by doing Y\". I don't think these are contributions. A contribution is something that you provide to the community, e.g., a novel method, an application, an answer to a research question, etc.\n- From the beginning of the paper it is advertised that the dataset size is 44k, although this number also includes the images that were discarded. The pretraining dataset size was 39k images, which is still quite large. I suggest saying that the dataset size was 39k and not 44k. Furthermore, the caption of Figure 1 reads \"During pretraining, we carefully develop our method with 44k images\" which seems to not be true; the dataset size is 39k.\n- Figure 1. The \"testing\" shows a \"frozen\" icon, but as far as I understood, the models are partially fine-tuned on the \"test datasets\".\n- Figure 1. It is unclear what \"underline\" means.\n- There is no \"Previous work\" section. Although in the introduction a lot of previous work have been cited, it was mostly to highlight the deficiencies of that previous work and not to explain what were the previous methods about. \"Previous work\" also gives context to the paper, and it helps introducing the methods that you will later compare. The methods in Table 1 (VoCo, VF, MG) seem to come out of nowhere, and they're \"Previous related works\". Also, in Section 4.1 \"Observations\", it is written \"SSL schemes using the masked image modeling paradigm (MG, S3D-B, and S3D-L) consistently rank higher than the contrastive VoCo or the pseudo-segmentation-based VolumeFusion pre-training method for CNN pre-training\", but the reader has never been told that those previous related works were based on different strategies, which is very important to understand why those methods were chosen.\n- To better illustrate the masking, I suggest including a figure where the reader can see how the input of the models looks like.\n- Figure 2. The text and numbers are a bit hard to read. I suggest increasing their size.\n- Typo in L266: \"Results are presented in Table Table 1b\"\n- Typo in L269: \"(S3D-B=\"\n- Typo in L204: \"betweem\"\n- L306: \"MAEs are known to benefit from scaling.\". I suggested including a citation.\n- I suggested having a separate section or subsection where the experiments and experimental settings are clearly defined.\n- The first line of the conclusion reads: \"This work is the first to demonstrate the potential of properly configured MAEs in 3D medical image segmentation\". However, by googleing \"masked auto encoder medical image segmentation\" many works pop up (e.g., [1,2,3,4]), and since there was no \"previous related work\" section, it is not clear if this is really \"the first to demonstrate the potential of properly configured MAEs in 3D medical image segmentation\"\n\n[1]: Self Pre-training with Masked Autoencoders for Medical Image Classification and Segmentation. ISBI 2023.\n\n[2]. Masked Autoencoders for Unsupervised Anomaly Detection in Medical Images. Procedia Computer Science 2023.\n\n[3]. Advancing Volumetric Medical Image Segmentation via Global-Local Masked Autoencoder. Arxiv 2023\n\n[4]. Self-supervised pre-training with contrastive and masked autoencoder methods for dealing with small datasets in deep learning for medical imaging. Sci. Rep 2023." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- This work identifies and tackles three issues regarding the evaluation of previous methods: small dataset size, inadequate backbones, and insufficient evaluation.\n- The pretrained model was evaluated on several datasets with different down-stream segmentation tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a framework based on self-supervised learning in which a large dataset of 3D brain MRI images is leveraged. The model resulting from this framework was fine-tuned and evaluated on various down-stream tasks, yielding segmentations more accurate than other state-of-the-art models such as nnUNet." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Very unclear and unorganized manuscript. I believe that it can be improved substantially. I specified many details in \"Suggestions\", including the following: previous related works were not described, unclear concepts are not introduced, parts of what should be the conclusion (e.g., that MAEs dominate, SSL pretraining works) are in the \"Results and discussion\" section, there is no section/subsection where the experiments are clearly described and instead they're mixed with \"results and discussion\". Another example of mixing: right before the conclusion, in only one paragraph (L508-516), we can find an experiment description, the results, and the discussion, all mixed together.\n- Limited novelty.\n - Limited methodological novelty. The framework is based on well-established Masked AutoEncoders \"with the recent adaptations introduced by Tian et al. (2023); Woo et al. (2023)\".\n - Partially limited application novelty since the pretrained models are not publicly available. Although the code is shared, researchers may not have access to large datasets; L53 reads that there seems to be \"a public decrease in the community’s willingness to share data\" (I don't agree or disagree with this statement, but this may be only regarding brain MRI).\n- In many cases, it is unclear if one approach is better than another because no standard deviations are shown. In other words, it cannot be understood whether a method achieving a 71.66 dice coefficient is actually better than another method achieving 71.35." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We revisit MAE pretraining for 3D medical image segmentation for state-of-the-art CNNs and show it's efficacy relative to currently established SSL methods in the medical domain." }, "_bibtex": { "value": "@misc{\nwald2024revisiting,\ntitle={Revisiting {MAE} pre-training for 3D medical image segmentation},\nauthor={Tassilo Wald and Constantin Ulrich and Stanislav Lukyanenko and Andrei Goncharov and Alberto Paderno and Leander Maerkisch and Paul F Jaeger and Klaus Maier-Hein},\nyear={2024},\nurl={https://openreview.net/forum?id=0JcPJ0CLbx}\n}" }, "abstract": { "value": "Self-Supervised Learning (SSL) presents an exciting opportunity to unlock the potential of vast, untapped clinical datasets, for various downstream applications that suffer from the scarcity of labeled data. While SSL has revolutionized fields like natural language processing and computer vision, their adoption in 3D medical image computing has been limited by three key pitfalls: Small pre-training\ndataset sizes, architectures inadequate for 3D medical image analysis, and insufficient evaluation practices. We address these issues by i) leveraging a large-scale dataset of 44k 3D brain MRI volumes and ii) using a Residual Encoder U-Net architecture within the state-of-the-art nnU-Net framework. iii) A robust development framework, incorporating 5 development and 8 testing brain MRI segmentation datasets, allowed performance-driven design decisions to optimize the simple concept of Masked Auto Encoders (MAEs) for 3D CNNs. The resulting model not only surpasses previous SSL methods but also outperforms the strong nnU-Net baseline by an average of approximately 3 Dice points. Furthermore, our model demonstrates exceptional stability, achieving the highest average rank of 2 out of 7 methods, compared to the second-best method’s mean rank of 3. Our code is made available here." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": { "value": [ "~Tassilo_Wald1", "~Constantin_Ulrich1", "~Stanislav_Lukyanenko1", "~Andrei_Goncharov1", "~Alberto_Paderno1", "~Leander_Maerkisch1", "~Paul_F_Jaeger1", "~Klaus_Maier-Hein1" ] }, "authors": { "value": [ "Tassilo Wald", "Constantin Ulrich", "Stanislav Lukyanenko", "Andrei Goncharov", "Alberto Paderno", "Leander Maerkisch", "Paul F Jaeger", "Klaus Maier-Hein" ] }, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "self-supervised learning", "medical image segmentation", "foundation models", "medical image computing", "CNN", "nnU-Net" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": { "value": "wald|revisiting_mae_pretraining_for_3d_medical_image_segmentation" }, "pdf": { "value": "/pdf/26d1c4da9478b76b6d8ec1ff3adf16309700a83c.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Revisiting MAE pre-training for 3D medical image segmentation" }, "venue": { "value": "ICLR 2025 Conference Withdrawn Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Withdrawn_Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0JjsZC0w8x
COrAL: Order-Agnostic Language Modeling for Efficient Iterative Refinement
main
Active
autoregressive large language modeling;decoding;iterative refinement
foundation or frontier models, including LLMs
3;6;6;6
4;2;3;5
2;4;3;4
2;3;3;2
3;2;2;4
5.25
3.5
3.25
2.5
2.75
-0.258199
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- If the way to generate tokens in the first step is different from that in the process of iterative refinements? Are there any better methods to generate draft tokens." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- This paper is well-writen and easy to follow. \n- The performance on logical reasoning tasks are good." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes Context-Wise Order-Agnostic Language Modeling (COrAL), which incorporates iterative refinement directly into the LLM architecture while maintaining computational efficiency. Empirical evaluations on reasoning tasks demonstrate that COrAL improves performance and inference speed, and results on code generation indicate a drop in pass rates due to inconsistencies in order-agnostic outputs, highlighting the inherent quality–speed trade-off." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- I think this paper is similar to the other type of works, i.e., speculative decoding, what the difference between them? \n- The noverty is limited, since the specific ways for iterative refinements, the training methods to learn correction, are borrowed from previous works. \n- The significant one: this method seems to only work in specific tasks, the logical reasoning tasks in this paper. However, we always focus on the generalization of current language models, i.e., the competitive on a wide range of tasks." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "The clarity of the paper is good, it's easy for people to follow generally. I don't have further questions." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- Improved Efficiency and Performance: COrAL’s order-agnostic framework allows simultaneous forward and backward processing, significantly reducing inference latency compared to traditional autoregressive models. Compared to the ablated baselines, empirical results on datasets like GSM8K and LogiQA demonstrate notable accuracy gains, confirming the model’s effectiveness in complex reasoning tasks.\n- Scalably Adaptable from Existing Models: By using context-wise modeling and target-aware positional encoding, COrAL manages to enhance dependency capture without substantially increasing computational resources, making it feasible for deployment in large-scale applications, even with existing large language models with only minor adaptation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes COrAL(Context-Wise Order-Agnostic Language Modeling), a novel architecture for language modeling that enhances efficiency in iterative refinement, aiming to reduce inference latency in large language models (LLMs). Traditional autoregressive models, which generate text sequentially, struggle with efficiency due to the natural linear time complexity in inference. COrAL incorporates iterative refinement directly into the model, allowing multi-token generation and backward reconstruction within manageable context windows. This order-agnostic approach enables simultaneous forward and backward decoding within sliding context windows, effectively accelerating inference and improving performance on reasoning tasks. Empirical tests show significant improvements in both accuracy and inference speed, demonstrating COrAL's promise in capturing diverse token dependencies without the high latency typical of AR models. However, challenges remain, such as reduced performance in code generation due to output consistency issues, indicating areas for further refinement." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Lack of survey of some (maybe kind of obsolete yet important) existing methods: This method resembles Scheduled Sampling in multiple aspects, yet it severely lacks the acknowledgement of this method (no citation nor even mentioning). It shares many ideas and practices with SS, necessitating a deeper analysis on the connection and differences between the method. For example, I'd recommend the authors to emphasize the capability of the proposed method on semi-parallel, refinitive generation, whereas SS was originally only proposed for improvements of performance in sequential generation. \n- Lack of deeper discussion on the theoretical insights: I appreciate the authors' awesome work in presenting and delivering the empirical results, but I presume it would appeal the community more if some insightful conclusions can be presented alongside the experiment observations." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "It would be really interesting to check how much performance is lost by starting from a pretrained model as compared to full training a method employing coral from scratch. Do you think that some performance is left on the table because you start from a pretrained model? \n\nIn the main result part, to increase my rating I would like to see a comparison to other interative refinement methods that have a similar computational cost as the w/o multi-token prediction variant of the proposed method and also a more detailed description of the autoregressive baseline.\n\nSuggestion: Maybe it would be a good idea to incorporate an application in which this method shines. E.g., by looking into domains that can benefit from the order-agnostic aspect such as protein language modelling." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* the authors propose an interesting paradigm and show that it has promise for reducing computational cost and enhancing performance in certain settings\n* the method is applicable to autoregressive pretrained language models and seems to improve their performance in certain settings\n* the authors provide a quite extensive ablation study for their method\n* the paper contains some beautiful figures such as figure (2) and (3). Even though Figure (2) is a little bit unclear to me. Why are there seemingly different offsets for the refinements and why is there not much visual seperation inbetween forward prediction and refinement?" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduce a novel decoding strategy combining autoregressive modelling with ROBERTA-like order agnostic refinement. Given a partial sequence, they predict multiple tokens ahead, which they subsequently refine using ROBERTA-like denoising autoencoder. The authors see performance improvements on GSM8K and LogiQA and poor performance on code generation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* pseudo-code for Algorithm 1 is provided without walking through the pseudo-code\n* in the experimental section the baselines are not described in enough detail, just AR. the proposed method requires finetuning, are the AR baselines also finetuned on the tasks?\n* the by-far-best performance is achieved using the w\\o multi-head prediction ablation, which is not the proposed method and thus weird. I assume this variant suffers from increased computational cost compared to the proposed method. It would be interesting to compare this ablation with a method from the related work that has a similar computational cost.\n* comparison to refinement methods from the related work is missing\n* a somewhat non-standard notation for expected values is used. their subscripts seem to be used much like in summations, but usually subscripts at an expected value are used to indicate over which distribution the expectation is taken: e.g., equation (1) and equation (3)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "In eq.8 entropy is always positive, so -H(x) is always negative and exp(-H(x)) is always less than 1. So min(a,a*exp(-H(x))) is always a*exp(-H(x))." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "1. The topic of the paper is interesting, transformer-based model do have the problem of slow decoding speed. \n\n2. It make a good balance between the speed and the performance." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose a new decoding method, called CORAL, which can speed up the decoding process and maintain (or upraise) the performance of the model in some tasks. CORAL has 2 parts: prediction and verification. The experiment shows that the verification part can help the model to generate more accurate results. CORAL also designed a strategy named \"multi-forward\" to speed up the decoding process (although it may hurt the performance). The result shows that the CORAL is useful in math problems but is useless in the code generation task." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The improvement of the CORAL is not generalizable enough. It only works well in the some math/logic problems but not in the code generation task.\n\n2. Although the speed of the decoding process is improved, it needs to use more GPU memory (and \"waste\" some computation because of verification and multi-forward) to achieve this. So it is not friendly to equipment that most people use." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024coral,\ntitle={{CO}r{AL}: Order-Agnostic Language Modeling for Efficient Iterative Refinement},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0JjsZC0w8x},\nnote={under review}\n}" }, "abstract": { "value": "Iterative refinement has emerged as an effective paradigm for enhancing the capabilities of large language models (LLMs) on complex tasks. However, existing approaches typically implement iterative refinement at the application or prompting level, relying on autoregressive (AR) modeling. The sequential token generation in AR models can lead to high inference latency. \nTo overcome these challenges, we propose **C**ontext-Wise **Or**der-**A**gnostic **L**anguage Modeling (COrAL), which incorporates iterative refinement directly into the LLM architecture while maintaining computational efficiency. Our approach models multiple token dependencies within manageable context windows, enabling the model to perform iterative refinement internally during the generation process. Leveraging the order-agnostic nature of COrAL, we introduce sliding blockwise order-agnostic decoding, which performs multi-token forward prediction and backward reconstruction within context windows. This allows the model to iteratively refine its outputs in parallel in the sliding block, effectively capturing diverse dependencies without the high inference cost of sequential generation.\nEmpirical evaluations on reasoning tasks demonstrate that COrAL improves performance and inference speed, respectively, achieving absolute accuracy gains of $4.6$\\% on GSM8K and $4.0$\\% on LogiQA, along with inference speedups of up to $3.9\\times$ over next-token baselines. Preliminary results on code generation indicate a drop in pass rates due to inconsistencies in order-agnostic outputs, highlighting the inherent quality--speed trade-off." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "autoregressive large language modeling", "decoding", "iterative refinement" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/76f3ec579890db3963854a91814f12af10b81768.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/c78173aac3e45206bf8b30d567efe1d5403585b5.zip" }, "title": { "value": "COrAL: Order-Agnostic Language Modeling for Efficient Iterative Refinement" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0JwxMqKGxa
Reinforcement Learning on Synthetic Navigation Data allows Safe Navigation in Blind Digital Twins
main
Active
Electronic Travel Aids;Virtual Environment;Semantic segmentation;Reinforcement Learning
applications to neuroscience & cognitive science
1;1;3;3;5;6
4;3;5;4;3;4
1;2;1;2;2;3
1;1;2;2;2;3
3;2;1;2;2;3
3.166667
3.833333
1.833333
1.833333
2.166667
0.021693
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "A lack of specialized navigation data for visually impaired individuals hinders progress in AI-driven assistive devices. To address this, the work presents a virtual environment that generates human-like navigation data using procedurally generated labyrinths. A convolutional neural network, trained with reinforcement learning and semantic segmentation on synthetic data, enables effective obstacle avoidance. But I have some concerns illustrated in the weaknesses section. Looking forward to seeing the response from the author for these questions." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "(1) A significant gap in the literature is identified concerning the use of navigation data to enhance Sensory Substitution Systems.\n(2) NavIndoor, an open-source software, is introduced for the computationally efficient generation of procedurally generated, obstacle-filled environments, enabling seamless integration with AI systems. NavIndoor supports the efficient creation of large-scale, human-like navigation datasets.\n(3) The study demonstrates that synthetic data enables the extraction of low-dimensional features for navigation by individuals with visual impairments.\n(4) It is demonstrated that applying basic morphological operators to synthetic semantic segmentation maps enhances performance in real-world conditions after training." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Limited access to specialized navigation data for visually impaired individuals remains a major obstacle in advancing AI-driven assistive devices. To address this challenge, this work introduces a virtual environment specifically designed to generate human-like navigation data from procedurally generated labyrinths. Utilizing reinforcement learning and semantic segmentation, a convolutional neural network was trained to perform obstacle avoidance based on synthetic data. The resulting model surpassed state-of-the-art backbones, including DINOv2-B, in accurately identifying safe pathways in real-world settings. Overall, despite training exclusively on synthetic data, the model successfully extracted features conducive to safe navigation in real-world conditions, potentially paving the way for innovative solutions to assist the visually impaired." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(1) The model's training primarily relies on synthetic navigation data from procedurally generated environments, which may not fully capture the complexity of real-world conditions. This could introduce limitations in the model’s generalization to unpredictable real-world scenarios.\n(2) While the paper claims some level of real-world transferability, there is limited discussion on effective domain adaptation techniques or extensive testing in real environments. This lack of robust domain adaptation could mean the model's performance may vary significantly when exposed to real-world conditions without sufficient adaptation.\n(3) The study lacks discussion on how well the model’s navigation cues (such as haptic or auditory feedback) are perceived and utilized by visually impaired users. Testing on real users could provide valuable insights into how user-friendly and effective the system is in practical assistive scenarios." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- I’m curious about the decision to use a CNN and D3QN rather than exploring more advanced architectures. If on-device computation efficiency is a concern, have you considered using pruning and quantization to optimize more complex models? \n\n- Have you tested the model on a portable device? The reported testing on an RTX 4090 at 179 FPS seems beyond the computational needs of the task and may not reflect real-world performance on a SSD, where hardware specifications vary significantly. Since efficiency was a key factor in selecting a simpler model structure, testing on a lower-power or portable device could provide more insight into practical deployment. Alternatively, a wireless solution might offer a way to handle intensive computation remotely, particularly in indoor settings where connectivity is reliable." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- small and compact model purely trained on segmentation map, good computation efficiency.\n\n- use synthetic data and follow a sim2real approach\n\n- potential impact on visually impaired individuals." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a method and a simulator for training sensory substitution devices aimed at facilitating safe navigation for visually impaired individuals. The authors employ reinforcement learning to handle obstacle avoidance tasks. Training is initially conducted in a simulated environment, where a semantic segmentation camera captures segmentation maps as input. For real-world application, an external segmentation model processes RGB images to generate similar segmentation maps, which are then fed into the navigation model. The authors design a simple, compact model that relies exclusively on segmentation maps, optimizing for computational efficiency. They evaluate the model’s performance both in simulation and on real-world datasets, comparing it with pre-trained state-of-the-art models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- in abstract, line 015, typo \"developped\" should be \"developed\"\n\n- It might not be necessary to build a stand alone simulation platform to achieve the task, the scene creation and data collection can definitely be accomplished using existing simulators. Many available simulators focus on photorealism but could still be effective for this task, as they have already demonstrated good results in sim and real. Therefore, I'm wondering if it worth the effort to develop a simulator just for this task.\n\n- segmentation categories are quite limited, which may not sufficiently capture the rich semantics required in complex, real-world environments.\n\n- It might not be a fair comparison to compare the model specifically for navigation task with general-purpose models trained for recognition." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "I think there is quite some work that needs not only the experiments and organization of the paper but also reiterating in the problem formulation." }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Strengths:\n\n1. The figures are easy and intuitive to understand.\n2. The experiments performed are represented on visuals well." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors try to overcome limited real-world datasets or environments that are expensive to train by using simulators, but at the same time using limited real-world datasets to incorporate selective learning or domain transfer in the optimization pipeline." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Weakness:\n1. The problem motivation is clear to me and the problem formulation is also clear to me. But I don’t understand the connection between two. I think both of them are independent problems, in the sense that the authors could’ve just directly posed it as a sim2real navigation problem, whereas visual navigation in the world is: Can’t it be done so? Please correct me if I’m wrong.\n2. My research is in visual navigation, and from what I see, to put it briefly in words, this problem would have made a lot of sense 4-5 years ago when obtaining a real-world policy was expensive to learn in real-world environments. But with current sota visual navigation algorithms and realistic simulators, I think both algorithms trained in a wide variety of simulator data and models that have already incorporated lots of indicative knowledge and priors from large datasets (LLMs, VLMs, RTX, etc.) would generalize well to real-world tasks. I’d suggest the authors incorporate these models as the baselines instead of models that output some form of representations and then training policy on top of that.\n3. From what I see, the authors need to spend a bit more time in the manuscript presentation, not that there are too many typos, but I think the formatting and sizes of different figures with the text is not coherent and consistent\n4. The paper also has a lot of technical flaws in the experiment section; for example, the only strength I see is in table 2, but I fail to even understand what the full form of VCD is, let alone understand the technical aspects. I suggest the authors lay out their contributions well and elaborate on each of the specific contributions." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "- It's not okay to refer to blind people as \"blinds\" (line ~27) \n\n- I find it a little dubious to use 'blind people' as some target user groups without ever getting any input or user evaluation \n from the target group." }, "flag_for_ethics_review": { "value": [ "Yes, Other reasons (please specify below)" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Why did the authors not just use one of the many many indoor environment datasets that are already available (e.g., RoboThor. Matterport3D, Apple's Hypersim, Meta's Ego-Exo4D). Instead, their simulation framework has very very low visual realism which will affect the performance, when moving to real-world video data. Why not just train the model using one of these datasets that has high-quality realism? I'd suggest that the authors compare their approach using their custom environment to results using one or more of the existing datasets mentioned above. Additionally, I'd suggest the authors to discuss the specific advantages of their NavIndoor environment compared to existing datasets, particularly in relation to training navigation models for visually impaired users. User studies should be conducted in these comparisons.\n\n- Why did the authors not conduct any user study with blind people, so I'd consider the results pretty useless/irrelevant to the target user groups claimed in the title? I'd suggest that some user studies/evaluations with visually impaired participants for navigating in real world with and w/o this approach to strengthen the paper's claims and relevance to the target application.\n\n- There is not enough explanation about what a Sensory Substitution Device is, and this is the entire motivation of their paper. I suspect that most readers in the ICLR community would not know without explanation what a Sensory Substitution Device (SSD) is. I'd suggest that the authors add a dedicated subsection in the introduction or background to define SSDs and explain their relevance to the proposed research, and discuss how neural networks can be applied and used in this context clearly - possibly with some diagrams to illustrate the use case.\n\n- While the basic idea of the described approach is not flawed (train a model to do environment navigation, use that model to help users navigate in the real world), the proposed approach don't seem to offer any novelty over a large body of learning-based model construction and SLAM literatures, as well as many similar works published in robot mapping literatures. I'd suggest the authors compare with some of the latest works on SLAM or robot mapping techniques [1], Additionally, authors can more clearly articulate the novel aspects of their method in the context of existing literature on learning-based navigation against other mapping and SLAM\n\n- Can the authors perform an extensive comparison and user studies to justify against a large body of existing work on SLAM (see a recent survey in [1]) in the same context for guiding the visually impaired users? Using a semantic segmentation model to navigate through a (synthetic or real) indoor environment, and then evaluate such a model on pre-processed semantically-segmented images from a real-world indoor environment dataset? I'd suggest the authors to conduct a thorough user study on the target group (i.e. visually impaired), to support any meaningful claim on the key contribution of this paper. \n\n- In terms of robustness and generalization, what's the insight on why this approach would do any better than existing methods? I'd suggest the authors to provide more empirical evidence and/or theoretical justification for why their synthetic data generation and training approach leads to better robustness or generalization compared to existing methods, such as those mentioned in [1].\n\n[1] Deep reinforcement learning based mobile robot navigation: A review. https://ieeexplore.ieee.org/abstract/document/9409758" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The basic idea/concept of using synthetic datasets to train a learning model for navigation is fine and has in fact been done in some of the latest work on model constructions in CV/ML and SLAM in learning-based robotics, as well as many other CV/ML applications for learning-based robotics tasks (e.g. navigation, collision avoidance, driving/steering, etc)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper describes a RL method using synthetic data to train a model that can assist blind people in navigating through real-world environments." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The motivation about suggesting/implying to provide some forms of assistive tools for blind people seems completely irrelevant to the work presented in the paper. This paper presents a method to train a model for indoor navigation using semantic segmentation. There is no clear explanation on how the method is actually to be used by blind people as assistive devices for blind people to use for navigation (sensory substitution devices, as the paper claims). I suggest the authors to clarify the connection between the proposed work and the stated application. Please clearly explain how the semantic segmentation and navigation model could be integrated into an actual sensory substitution device. Please also discuss specific requirements of assistive technologies for the visually impaired that could inform them how to use the proposed research.\n\nI did not find the results to be particularly illuminating or superior, considering that the performance is more or less in the same range as existing SLAM algorithms for relatively controlled and simple environments.\n\nExamples shown also do not indicate any generalization capability to me. \n\nThe description also does not offer any rationale for high robustness either." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. Why didn't the authors use photorealistic simulators Habitat Sim and AI2Thor, which can solve indoor navigation problems, to train and validate their approach?\n\n2. Could the authors explain their rationale for developing NavIndoor rather than using existing environments like Habitat or AI2Thor? Are there specific advantages of NavIndoor for this task that are not provided by these other environments?\n\n3. Have the authors considered comparing their approach to more recent reinforcement learning methods, such as those used in Staroverov, A., et al. \"Skill fusion in hybrid robotic framework for visual object goal navigation.\" Robotics 12.4 (2023): 104? What specific benchmarks or challenges do they think would be most relevant for evaluating their system's performance?\n\n4. Why didn't the authors include any form of anonymized open source code in the article or supplementary materials?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "1. The authors talk in their article about the need to solve an extremely important and sensitive problem of creating intelligent navigation systems for visually impaired individuals. \n\n2. The authors developed and tested NavIndoor, an open-source software for the computationally efficient generation of procedurally generated, obstacle-filled environments, enabling seamless integration with AI systems." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "- The authors developed a virtual environment designed to extract various human-like navigation data from procedurally generated labyrinths.\n\n- Using reinforcement learning and semantic segmentation, authors trained a convolutional neural network to perform obstacle avoidance from input RGB data. They demonstrated that their model outperformed state-of-the-art backbones including DINOv2-B in safe pathway identification in the real world." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The overview of methods in Figure 1 requires some improvement: in the subfigure named \"Signal processing method\", the authors mention Machine Learning and Neural Networks separately, but Neural network training is also machine learning. The category \"Of which trained using blind specific data\" also looks strange.\n\n2. The Related work section does not contain a single paper from 2024, which is strange. It is necessary to explicitly indicate that there are no such works, or add them to the overview.\n\n3. The Q-network architecture proposed by the authors is very simple and it is unclear how it differs from existing models used in modern works on reinforcement learning. The authors should add an explicit mention of the differences in the caption to Figure 4. Please, compare specific aspects of author's architecture to existing models, or to highlight any novel elements that may not be immediately apparent from the figure.\n\n4. On page 8, Active Vision Dataset (AVD) is mentioned, but no reference to the source is provided. The authors need to explain what this dataset is.\n\n5. Figure 8 does not have labels for the values ​​on the vertical axis. They should be added.\n\n6. In the abstract and introduction, the authors say that their system and method are specifically developed for visually impaired individuals. However, the developed dataset, methodology, and experiments look as if they are solving a general navigation problem typical for intelligent agents (robots, etc.) using data from onboard sensors with a discrete action space. The authors need to explicitly clarify this in the article. Otherwise, the title of the article may mislead the reader.\n\n7. The usefulness of the developed solution is questionable due to the poor photorealism of the simulator used and the overly simplified formulation of the navigation problem. Photorealism is generally important for the quality of image-based navigation methods to effectively transfer to real-world environments.\n\n8. The English language of the article requires careful checking, for example, \"in\" does not look entirely correct in the phrase \"dedicated navigation data in visually impaired individuals\" in the abstract. The text contains unnecessary punctuation marks, for example, several dots in a row. Typo \"developed\" in the abstract." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "This is more advice than question, but the biggest thing this paper can do to increase its quality is to generate more convincing results. The easiest thing to try would be to put a camera on blindfolded real human participants and have them use the proposed models and baselines and see which allows them to avoid obstacles the best. Alternatively (if the IRB approvals for that are too hard to get), train an RL model that takes as input exactly the same sensory substitution that a visually impaired person would get from the model, and report how well it does at navigation tasks, both with the proposed model and the baselines." }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "*This paper has the most thorough literature review I have ever seen in a conference paper (109, I counted! As many pages of citations as paper). I in particular really appreciate Figure 2 and the comprehensive classification of prior work on sensory substitution devices. This is great at giving a background on SSDs to an audience which may be more familiar with ML.\n\n*The motivation behind this paper is excellent. Helping the visually impaired see and navigate through human environments is incredibly important, and this paper does an excellent job of establishing why we should care about this work.\n\n*There are many papers in AI/ML/robotics dedicated to sim2real transfer, but in this paper it works fairly well. The simulation is fairly simplistic (at least visually) and the real evaluation environment uses images taken in the real world, so the transfer is no easy feet. The use of semantic segmentation masks as input to the model likely makes a big difference, however even so it is still non-trivial. I am surprised at the transfer success.\n\n*In the Linear Probing section, there is a comparison to several state of the art baselines. Comparing to baselines is incredibly important, so the comparison here is a good thing." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The goal of this paper is to propose and evaluate a methodology for learning a vision model that can act as a sensory substitution device for the blind/visually impaired. The paper proposes a method for generating simulation data, which is then used to train a model. This model determines where it is safe to travel, specifically forward, left, right, or backward. The model takes as input a semantic segmentation of the scene and the action history, and outputs the Q-value of each of the 4 actions. The results show that the best model can achieve nearly 75% human performance in simulation, and on the real dataset the output of the value function is strongly correlated with distance to navigation boundary. This suggests that it can act as a good aide for a visually impaired person moving through an indoor space." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "*The biggest weakness with this paper is that the results are just not convincing. The numbers presented are 1) comparison to human-performance on the sim task (human having full visual sight), 2) distance to navigation boundary as compared to output of the value function, and 3) AUC comparison to the baselines (note that AUC is never defined in the paper, and the abbreviation is never clarified (I assume it stands for area under the curve?)). If the goal is to assist visually impaired people navigate, how do these numbers show that? \nThey are very indirect. Why not try actual navigation tasks with the proposed model and baselines? To convince me this model is actually useful, I need to see more directly applicable results.\n\n*The paper claims the value function is an indicator of safety and guidance (Figure 4 caption). This is not well-justified. Why should the arbitrary “best possible reward the agent can get from a given state” be the same as whether or not that state is safe?\n\n*There are several missing citations. SegFormer is never cited. Furthermore, the Active Vision Dataset is not cited. Where did it come from? Who collected it? What kind and how much data is there? How are you estimating distance to a navigation boundary?\n\n*The creation of the simulated data is not explained well enough. It seems only the Figure 3 caption gives any data on this, and that is not much. How is DFS used to place the obstacles and collectibles? \n\n*Also, why are there collectibles? That seems like a random addition to the simulated data. If your goal is to get the agent to explore, there are many ways in the RL literature to motivate exploration (e.g., maximum entropy)." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "This study describes an innovative method for extraction of low-dimensional cues for navigation in blinds." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024reinforcement,\ntitle={Reinforcement Learning on Synthetic Navigation Data allows Safe Navigation in Blind Digital Twins},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0JwxMqKGxa},\nnote={under review}\n}" }, "abstract": { "value": "Limited access to dedicated navigation data in visually impaired individuals is a significant bottleneck for developing AI-driven assistive devices. For this purpose, we have developped a virtual environment designed to extract various human-like navigation data from procedurally generated labyrinths. Using reinforcement learning and semantic segmentation, we trained a convolutional neural network to perform obstacle avoidance from synthetic data. Our model outperformed state-of-the-art backbones including DINOv2-B in safe pathway identification in real world. In conclusion, despite being trained only on synthetic data, our model successfully extracted features compatible with safe navigation in real-world settings, opening new avenues for visually impaired." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Electronic Travel Aids", "Virtual Environment", "Semantic segmentation", "Reinforcement Learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/ff1b88783eb9136c1b1ca7f0f3947060808e49ea.pdf" }, "presentation": null, "primary_area": { "value": "applications to neuroscience & cognitive science" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Reinforcement Learning on Synthetic Navigation Data allows Safe Navigation in Blind Digital Twins" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0K0hoNL9sx
Quantifying the similarity of information contained in probabilistic latent spaces
main
Withdraw
Information theory;representation learning;disentanglement
unsupervised, self-supervised, semi-supervised, and supervised representation learning
Kieran A. Murphy;Sam Dillavou;Danielle Bassett
~Kieran_A._Murphy1;~Sam_Dillavou1;~Danielle_Bassett1
0
0
0
0
0
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "We found a bug in the calculation of the mutual information between full latent spaces (Fig 4), and the relevant results change significantly enough that we feel resubmission is necessary." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": { "value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors." } }, { "TLDR": { "value": "We compare the information content of probabilistic representation spaces, and use it to analyze VAE latent spaces and to perform ensemble learning." }, "_bibtex": { "value": "@misc{\nmurphy2024quantifying,\ntitle={Quantifying the similarity of information contained in probabilistic latent spaces},\nauthor={Kieran A. Murphy and Sam Dillavou and Danielle Bassett},\nyear={2024},\nurl={https://openreview.net/forum?id=0K0hoNL9sx}\n}" }, "abstract": { "value": "In contrast to point-based representation spaces, probabilistic representation spaces have a well-defined sense in which they compress information about a dataset.\nWhen viewing representation spaces as communication channels, it becomes natural to ask about the similarity of information content of different representation spaces.\nStarting with classic measures of similarity of hard clustering assignments, we propose a natural modification that generalizes to probabilistic representation spaces.\nWe also propose a practical route toward estimating the similarity measure based on fingerprinting a representation space with a sample of the dataset that is applicable when the transmitted information is only a handful of bits.\nEquipped with the similarity measures, we build upon model centrality as a signature of unsupervised disentanglement by assessing ``channel centrality'' and finding information fragments that are repeatedly learned in VAE and InfoGAN ensembles.\nAdditionally, we evaluate the diversity of information content of the full latent space over the course of training for ensembles of models, and find a striking difference in homogeneity of information depending on the dataset.\nFinally, we leverage the differentiability of the proposed method and perform ensemble learning with VAEs by boosting the information content of a set of weak learners incapable of representing the global structure of a dataset." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": { "value": [ "~Kieran_A._Murphy1", "~Sam_Dillavou1", "~Danielle_Bassett1" ] }, "authors": { "value": [ "Kieran A. Murphy", "Sam Dillavou", "Danielle Bassett" ] }, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Information theory", "representation learning", "disentanglement" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": { "value": "murphy|quantifying_the_similarity_of_information_contained_in_probabilistic_latent_spaces" }, "pdf": null, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Quantifying the similarity of information contained in probabilistic latent spaces" }, "venue": { "value": "ICLR 2025 Conference Withdrawn Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Withdrawn_Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0K1OaL6XuK
Planning Anything with Rigor: General-Purpose Zero-Shot Planning with LLM-based Formalized Programming
main
Active
LLM Planning;Code generation;LLM Tool-Use
foundation or frontier models, including LLMs
1;6;6;6
4;4;3;3
2;3;3;3
1;2;3;3
2;3;3;3
4.75
3.5
2.75
2.25
2.75
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. As the authors noted, LLMs have been used to translate natural language to planning problems. Similarly, the mapping from planning to SMT is well known in the planning literature. So, is the novelty is limited to combining the two ideas together??\n2. In page 3, just above the first paragraph, you seem to say that encoding to PDDL requires more human effort than encoding to SMT. Can you elaborate why?\n3. How do you encode the length of the plan? When compiling planning to SAT or SMT, this is an issue because the solver (SAT/SMT) requires to set an upper bound while in PDDL it does not have too." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Strength\n- The paper is in general clear (even if it is sometimes hand-wavey)\n- The problem is interesting and the related work seems to cover all bases\n- The results are impressive, and much better than the baselines. \n- The proposed workflow makes sense and works well." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the problem of solving planning problems that are given in natural language. The proposed algorithm they propose – LLMFP - is a workflow of multiple LLMs, including an LLM to extract variables and constraints from the text, an LLM to formulate the extracted variables and constraints as an SMT problem in a specific format, an LLM to convert this format to code that can be run by an SMT solver, and an LLM to verify and correct mistakes by the other LLMs. This LLM workflow is evaluated against the other LLM-based methods to solve planning problems, including one that is similar to the LLMFP but creates PDDL instead of an SMT problem. The authors also examine how results can be better by adding some task-specific expertise. The results over a set of benchmark problems show that LLMFP is, in general, much better than the baselines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- I’m not sure if the novelty of the proposed work over the PDDL-based approach is sufficiently novel for a top conference. \n- The appendix is huge (~40 pages!). This seems to me not reasonable, as the main paper should be self contained. \n- The presentation is too much hand-wavy. It would be great to try to capture more of it in a more formal manner" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. What is the definition of a planning problem in this paper?\n2. Why are the baselines only LLMs when the proposed approach is a framework/architecture? LLM-PFC [1] approaches planning problems similarly and there are other baselines to consider like Plan-SOFAI [2].\n3. LLMs when used with API's are found to hallucinate new API functions or overuse a specific API call. Is such behavior observed here?\n4. When it is a planning problem, why not directly use a symbolic planner and why is this architecture beneficial?" }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "LLMFP's ability to handle a wide variety of planning problems without task-specific examples is a significant strength." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "LLMFP is proposed which leverages LLMs to tackle complex planning problems by formulating them as optimization tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The baselines for comparison do not seem to be a fair comparison to LLMFP. See questions.\n2. The related work does not cover relevant set of papers that should have been used a baseline to compare this work. Mentioning a few of them below - \n[1] Webb, T., Mondal, S. S., Wang, C., Krabach, B., & Momennejad, I. (2023). A Prefrontal Cortex-inspired Architecture for Planning in Large Language Models. arXiv preprint arXiv:2310.00194.\n[2] Fabiano, F., Pallagani, V., Ganapini, M. B., Horesh, L., Loreggia, A., Murugesan, K., ... & Srivastava, B. (2023, December). Plan-SOFAI: A Neuro-Symbolic Planning Architecture. In Neuro-Symbolic Learning and Reasoning in the era of Large Language Models.\n[3] Katz, M., Kokel, H., Srinivas, K., & Sohrabi, S. (2024). Thought of Search: Planning with Language Models Through The Lens of Efficiency. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "I have no concerns." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. What are the most common failure modes of the baselines?\n\n2. Are the baselines prompted vanilla models plus the components described in lines 410-416, or do they also include other components, e.g. formatter?\n\n3. What are the success rates of the tested methods? Do they all achieve 100% and the question is only whether the solution is optimal, or do some methods fail to solve some instances at all?\n\n4. What's the wall time of LLMFP compared to the baselines?\n\n5. Are the methods explicitly instructed to provide optimal solutions?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "I like the general idea and the presented approach. One could argue that it is simply a combination of prompt engineering and the incorporation of external tools. However, showing an effective way of doing this can be a significant contribution.\n\nThe baselines and ablations are well-chosen for evaluating the performance of LLMFP.\n\nThe paper is written very clearly, making it easy to read. The figures are well-chosen (particularly Figure 1), they are helpful in understanding the pipeline. I like the section structure and the focus on key takeaways when discussing experimental results. Most of my questions that arose while reading the text were addressed in later sections." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a framework that pairs LLMs with optimization tools to solve planning tasks without using task-specific knowledge. The authors define consecutive stages of reasoning that, generally speaking, consist of understanding, coding, and refining. For each stage, they discuss the prompting, formatting, and other relevant decisions. Through experimental validation, they show that LLMFP outperforms baselines in 9 domains." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The goal stated in the introduction is \"Can we build a universal LLM-based planning system that can solve complex planning problems without task-specific efforts?\". However, my main concern is whether the tasks used for experiments are indeed complex planning problems. Specifically, the 5 multi-constraint problems resemble simply optimization problems rather than planning problems. Hence it's quite clear that adding an external optimizer to LLM would be much better than just using LLM. On the other hand, the multi-step problems seem to be rather simple and the main difficulty is to understand what we have to do rather than finding a good solution. Hence, I suggest adding at least one multi-step domain with high underlying complexity (e.g. Sokoban). If I missed something and some of your environments are actually NP-hard (or hard in any other reasonable sense), it should be remarked in the paper.\n\nSince the method you propose is clearly subject to a tradeoff between performance and computation time, there should be a discussion of that. What's the wall time of LLMFP compared to the baselines? What's the cost of using SMT compared to querying LLM?\n\nThe description of baselines should be extended a bit. Are they prompted vanilla models plus the components described in lines 410-416, or do they also include other components, e.g. formatter? Also, the Code variant uses pure Python, but for a completely fair comparison you should also add variant which is forced to use SMT like LLMFP does. After reading the prompts used, it's also not clear to me whether they are explicitly instructed to provide optimal solutions, which is captured by the metrics. Also, I suggest discussing the failure modes of the baselines (in the Appendix)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. How LLMFP handles generalization across different planning tasks. Please correct me, but it seems that we need a very Elaborate prompt with a high level of detail for each task. \n2. In section 3.4 (code generator), readers can Benefit from prior work such as \"CAPE: Corrective Actions from Precondition Errors using Large Language Models\" and \"CoT-TL: Temporal Knowledge Representation of Natural Language Planning Task for Autonomous Agents using Chain-Of-Thought.\" Or is LLMFP doing differently compared to the above works; if yes, explain or not, and make sure to provide proper background." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. LLMFP introduces a new perspective on using LLMs for formal optimization-based planning, a method that significantly expands the generalizability of planning tasks.\n2. Experimental results are solid, with clear evidence of performance gains across diverse tasks and models. The ablation studies reinforce the utility of the framework components, which I really liked.\n3. The ability to solve multi-step, multi-constraint problems without task-specific examples or extensive prior efforts is a major step forward in the area of LLM-based planning." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper provides the LLM prompting-based framework for planning tasks. The main contribution lies in its use of prompt and pipeline templates, which can be used across various planning tasks. Here, planning problems from various domains were considered, and planning is treated as an optimization problem. In summary, LLMs are used as an optimizer. They used the formal planner to achieve the goal of planning as already shown in the previous works that LLM still lacks the coherent reasoning needed for planning. The main contribution is an end framework that deploys a zero-shot learning approach for both single and multi-stage planning tasks. Additionally, the author claims that their framework can handle self-critique to assess the problem in planning code to change and achieve the goal. Effectiveness of framework components is supported via the ablations." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Complexity in FORMULATOR: Some parts, particularly the JSON representation and the code generation steps, could be simplified. While important, the handling of different variable types and constraints might be a bit dense for readers unfamiliar with optimization theory.\n2. Regrading Multi-Step Planning Problem: The predicate, object, and update structure are not clear in multi-step planning. Also image shown for this is not utilized in conveying the idea. Overall, Figure. 2 examples are not clear and make things confusing. \n3. The author claims that their framework is a \"general approach, which does not require task-specific examples or task-specific efforts\"; however, in the paper, this statement is not supported in terms of explanations and prompt structure.\n4. Some theoretical insights regarding performance would make this work more strong, right now its presented more like a experimental results." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a general-purpose planning framework with zero-shot generalization capability, which enable LLMs to build and solve diverse types of planning problems as optimization problems with no task-specific examples or external critics." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024planning,\ntitle={Planning Anything with Rigor: General-Purpose Zero-Shot Planning with {LLM}-based Formalized Programming},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0K1OaL6XuK},\nnote={under review}\n}" }, "abstract": { "value": "While large language models (LLMs) have recently demonstrated strong potential in solving planning problems, there is a trade-off between flexibility and complexity. LLMs, as zero-shot planners themselves, are still not capable of directly generating valid plans for complex planning problems such as multi-constraint or long-horizon tasks. On the other hand, many frameworks aiming to solve complex planning problems often rely on task-specific preparatory efforts, such as task-specific in-context examples and pre-defined critics/verifiers, which limits their cross-task generalization capability. In this paper, we tackle these challenges by observing that the core of many planning problems lies in optimization problems: searching for the optimal solution (best plan) with goals subject to constraints (preconditions and effects of decisions). With LLMs' commonsense, reasoning, and programming capabilities, this opens up the possibilities of a universal LLM-based approach to planning problems. Inspired by this observation, we propose LLMFP, a general-purpose framework that leverages LLMs to capture key information from planning problems and formally formulate and solve them as optimization problems from scratch, with no task-specific examples needed. We apply LLMFP to 9 planning problems, ranging from multi-constraint decision making to multi-step planning problems, and demonstrate that LLMFP achieves on average 83.7\\% and 86.8\\% optimal rate across 9 tasks for GPT-4o and Claude 3.5 Sonnet, significantly outperforming the best baseline (direct planning with OpenAI o1-preview) with 37.6\\% and 40.7\\% improvements. We also validate components of LLMFP with ablation experiments and analyzed the underlying success and failure reasons." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "LLM Planning", "Code generation", "LLM Tool-Use" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/6a06f93a10e487588f3c373bce647cf34791b8e7.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/03ac4a0e5788181f996298505462086c9a7d4433.zip" }, "title": { "value": "Planning Anything with Rigor: General-Purpose Zero-Shot Planning with LLM-based Formalized Programming" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0KFwhDqTQ6
PSHead: 3D Head Reconstruction from a Single Image with Diffusion Prior and Self-Enhancement
main
Withdraw
Diffusion models;Text to 3D;Image to 3D;3D Avatar
generative models
Jing Yang;Tianhao Walter Wu;Kyle Thomas Fogarty;Fangcheng Zhong;Cengiz Oztireli
~Jing_Yang7;~Tianhao_Walter_Wu1;~Kyle_Thomas_Fogarty1;~Fangcheng_Zhong1;~Cengiz_Oztireli1
3;3;5;5
5;5;4;4
3;2;3;2
2;1;2;2
3;3;3;2
4
4.5
2.5
1.75
2.75
-1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": { "value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors." } }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Can the generated head Gaussian model be driven? If so, please illustrate some novel pose synthesis results.\n2. Missing some references:\n [1] AvatarCLIP: Zero-Shot Text-Driven Generation and Animation of 3D Avatars;\n [2] DreamHuman: Animatable 3D Avatars from Text;\n [3] TADA! Text to Animatable Digital Avatars;\n [4] ZHOU Z., MA F., FAN H., YANG Y. Headstudio: Text to animatable head avatars with 3d gaussian splatting.\n3. In the ablation study, as Fig. 4 and Tab. 3 show, self-enhancement plays an essential role in generating quality outputs. Does this mean that you do not require the all of diffusion model priors, but that relying on a single diffusion prior, such as T2I combined with self-enhancement, is sufficient? Please provide additional ablation studies, such as T2I + self-enhancement, I2I + self-enhancement, and T2V + self-enhancement. I want to be certain that it is necessary to employ the all of diffusion model prior to distilling the initial head Gaussian model." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper successfully demonstrates the effectiveness of integrating T2I, I2I, and T2V diffusion models into a single framework for generating 3D avatars, showing good performance.\n2. The paper is well-written and easy to follow. \n3. The experimental results demonstrate better performance than the baselines in single-view reconstruction." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a new approach called PSHEAD for generating high-quality 3D avatars from a single image. The key contribution of this research is the utilization of a mixture of diffusion priors to create a coarse representation of the input face, which is then refined through the integration of 2D face priors. Experiments demonstrate promising results, outperforming several baselines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. My main concern lies in the technical contributions of this paper. The authors combine multiple models, such as T2I, I2I, and T2V, to achieve state-of-the-art results. They should provide more insights regarding the use of these models in the paper.\n2. The author should explain why the I2V model was not used and include an ablation study for the I2V model.\n3. The optimization-based method takes a long time to create a human head Gaussian model, requiring approximately 1.5 hours on a single NVIDIA A100 (80GB) GPU, which makes it difficult to use in practical applications." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- As depicted in **Figure 10**, is PSHead capable of effectively managing tasks involving the reconstruction of the head, upper body, and full body?\n- Does PSHead exhibit any racial inductive biases?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "**1.** It includes a comprehensive review of related works.\n\n**2.** The work effectively integrates existing modules and validates the efficacy of critical design components. Furthermore, it addresses a significant problem in the field of 3D Head Reconstruction." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "- This paper leverages the human face priors (e.g., Face landmarks and Face ID) and numerous 2D diffusion models via SDS to establish a coarse-to-fine pipeline for generating 3D avatars from a single image.\n- The proposed method consistently surpasses existing techniques (Magic123, DreamGaussian, Era3D, and PanoHead) on PointAvatar, CelebA, and a private dataset, achieving superior quantitative and qualitative results.\n- Detailed results and corresponding code are included in the supplements.\n\nHowever, the technical novelty is limited, as it primarily uses existing modules, and the empirical approaches for generating 3D Head Avatars from single images are typical." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**1.** The work presents incremental methods, mainly refining Head Reconstruction with predictable improvements and relying extensively on off-the-shelf modules such as 2D pre-trained diffusion models, face landmark detection, and ID recognition model (Arcface) for loss function. Specifically:\n - Coarse stage: Employs DreamBooth for personalized T2I diffusion to produce a preliminary 3D-GS.\n - Fine stage: Utilizes personalized T2I diffusion, landmark-guided ControlNet, and a pre-trained face refinement model (CodeFormer) .\n\nThe authors should discuss the design intuition rather than empirically constructing an engineering pipeline.\n\n**2.** The complexity of the engineering pipeline, detailed in **Figure 2** and **Section 3**, makes the work hard to follow and may hinder further exploration and industrial applications. \n\nThe authors should reduce the number of modules, focusing on core modules as the main claim.\n\n**3.** PSHead lacks the capability to drive expressions. \n\nUnlike previous works such as HeadGAP and Morphable Diffusion, PSHead does not support expression-driven animation, limiting its applicability to various downstream applications.\n\n**4.** The paper omits crucial information about model parameters and reconstruction times compared to cutting-edge 3D generation works (e.g., in **Tables 2** and **Table 3**).\n\n**5.** The per-instance optimization process takes approximately 1.5 hours (refer to Implementations), indicating high computational demands.\n\n\nI would appreciate it if the authors could address my concerns by providing corresponding quantitative or qualitative results based on the **weaknesses** and **review feedback**." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Questions: I don't have too much question for this paper.\n2. Suggestions: It is recommended that the authors focus on improving the quality of the side view and back view in order to achieve better results. Additionally, they should validate the effectiveness of using mixed SDS loss by comparing it with one or two SDS loss that can potentially achieve similar performance when combined with the refined stage. Furthermore, conducting evaluation in pixel space for novel views would provide more comprehensive results." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The proposed method demonstrates impressive results in generating high-fidelity photorealistic 3D avatars from a single-face image. The use of a 360◦ 3D-GS representation allows for capturing detailed facial features." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates the problem of creating high-fidelity photorealistic 3D avatars from only a single face image. They propose a method that learns a 360◦ 3D-GS representation for a reference image with varying face sizes, leveraging a mixture of diffusion priors to generate a coarse representation and refine the coarse representation in an innovative way by introducing 2D face priors." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper claims to have achieved great 360 free view rendering. However, upon examining the visual results in the paper, it can be observed that the side view and back view contain excessive noise and are significantly blurrier than the front view. In comparison, it does not appear to be better than PanoHead.\n\n2. Many techniques employed in this paper have been used in other papers with similar goals, but they don't address the limitations of these techniques. For example, in the refinement stage, it is unclear how the multi-view inconsistency of refined novel views is handled.\n\n3. Mixed SDS. This paper utilizes three types of SDS loss. However, in Figure 4, it seems that T2V SDS only provides marginal enhancements compared to I2I SDS. Although improvements are shown in Table 3, it is not demonstrated whether T2V still performs well when the refinement stage is followed by only T2I + I2I.\n\n4. The method section indicates that the geometry is primarily based on the SDS loss. While personalized diffusion models are mentioned, it remains unclear whether the geometry captures intrinsic details and performs better than generic SDS methods.\n\n5. The paper reports better numerical results for novel views compared to the comparison methods. However, it is worth noting that most metrics for evaluating novel views are done in feature space rather than pixel space(such as psnr). This could explain why the novel views generated by this method appear blurry, but still achieve higher scores than the baselines.\n\n6. The preservation of identity in the rendered avatars from novel views appears to be weak, as observed in Figure 3. In column 4, there is a noticeable change in identity." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "No need ethic review." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Although the multi-modality (text, image, and video) SDS work, I am not that confident of the motivation. Is it not enough with a single image-to-video model? An analysis or an ablation is necessary. \n- The GS representation usually results in the degradation of geometry. I hope for more geometry comparisons with Ponahead." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The proposed PSHead follows the pipeline of DreamGaussian, which also includes SDS-based initialization and image-based refinement. The author(s) add several well-designed components, such as DreamBooth, T2V-SDS, and Landmark ControlNet, improving the head generation quality compared to the baseline.\n2. The paper includes comprehensive experiments to evaluate the effectiveness of each design." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces an approach to head generation from a single image. The generation process consists of two stages: multiple pretrained models mixed SDS initialization and head-specific refinement. The framework results in realistic \n$360^{\\circ}$ head rendering." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- In the introduction, the author emphasizes that \"the normalization preprocessing steps of existing methods struggle in handling cases with varying scales.\" However, the results of PanoHead in Fig. 3 (also with shoulders) do not seem that bad. If this is a main motivation, I suggest conducting more comparisons to support it in the main paper rather than only showing a few cases in the appendix.\n- The results in Fig.2 are not satisfactory, with apparent appearance and shape consistency. The $360^\\circ$ videos in supplementary also show severe blur in novel views, especially in back views. In comparison, the results of Panohead are more realistic. It will be better to provide a more detailed analysis of these issues, including potential causes and ideas for improvement.\n- The added SDS strategy and refinement components lead to severe efficiency degradation, nearly 1.5 hours as reported in implementations. I think the authors should conduct a comparison of runtime vs. quality metrics analysis of the trade-offs between quality improvements and computational cost.\n- In ablations (section 5), the observation of the gaze direction is intuitive. However, it seems that there are no similar issues in Fig.3." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a method that learns a 3D model from a single face image with diffusion prior and face prior." }, "_bibtex": { "value": "@misc{\nyang2024pshead,\ntitle={{PSH}ead: 3D Head Reconstruction from a Single Image with Diffusion Prior and Self-Enhancement},\nauthor={Jing Yang and Tianhao Walter Wu and Kyle Thomas Fogarty and Fangcheng Zhong and Cengiz Oztireli},\nyear={2024},\nurl={https://openreview.net/forum?id=0KFwhDqTQ6}\n}" }, "abstract": { "value": "In this work, we investigate the problem of creating high-fidelity photorealistic 3D avatars from only a single face image. This task is inherently challenging due to the limited 3D cues and ambiguities present in a single viewpoint, further complicated by the intricate details of the human face (e.g., wrinkles, facial hair). To address these challenges, we introduce PSHead, a coarse-to-fine framework that optimizes 3D Gaussian Splatting for a single image, guided by a mixture of object and face prior to generate high-quality 3D avatars while preserving faithfulness to the original image. At the coarse stage, we leverage diffusion models trained on general objects to predict coarse representation by applying score distillation sampling losses at novel views. This marks the first attempt to integrate text-to-image, image-to-image, and text-to-video diffusion priors, ensuring consistency across multiple views and robustness to variations in face size. In the fine stage, we utilize pretrained face generation models to denoise the rendered noisy images, and use them as supervision to refine the 3D representation. Our method outperforms existing approaches on in-the-wild images, proving its robustness and ability to capture intricate details without the need for extensive 3D supervision." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": { "value": [ "~Jing_Yang7", "~Tianhao_Walter_Wu1", "~Kyle_Thomas_Fogarty1", "~Fangcheng_Zhong1", "~Cengiz_Oztireli1" ] }, "authors": { "value": [ "Jing Yang", "Tianhao Walter Wu", "Kyle Thomas Fogarty", "Fangcheng Zhong", "Cengiz Oztireli" ] }, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Diffusion models", "Text to 3D", "Image to 3D", "3D Avatar" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": { "value": "yang|pshead_3d_head_reconstruction_from_a_single_image_with_diffusion_prior_and_selfenhancement" }, "pdf": { "value": "/pdf/35f67a4275045e01f68da3eb47f66a9ce6f6b912.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/357dae60de5005463a4176e092f4d42811dffc37.zip" }, "title": { "value": "PSHead: 3D Head Reconstruction from a Single Image with Diffusion Prior and Self-Enhancement" }, "venue": { "value": "ICLR 2025 Conference Withdrawn Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Withdrawn_Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0KHW6yXdiZ
An End-to-End Model For Logits Based Large Language Models Watermarking
main
Active
LLM watermarking;End-to-end optimization;Robustness
foundation or frontier models, including LLMs
3;5;5;5
5;4;3;5
2;3;2;3
2;3;2;3
3;3;3;3
4.5
4.25
2.5
2.5
3
-0.522233
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "None" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- I do not understand why the prompt between non-watermarked and watermarked texts needs to differ (footnote 3 on page 9). Why can't the attacker re-use the same prompts when querying non-watermarked texts? \n\n- In Figure 2, I am unclear how the authors calculate the distance $L_{sem}$ between the watermarked and non-watermarked texts $X_{wm}, X_{nwm}$. Since both sequences will differ in the sampled tokens, they will diverge throughout the generation process if sampled for many tokens. Then, calculating this semantic distance will be meaningless as you cannot effectively align $X_{wm}, X_{nwm}$. Also, it appears unreasonable that the averaged similarity over many contexts will be a meaningful measure of the overall similarity between two sequences. I would appreciate the authors elaborating on this point and providing more context\n\n- The description of the online text editing module is a bit confusing to me. Do the authors also use Gumbel-softmax for the online text editor, or do they pass $X_wm$ directly to the decoder $D$ contrary to what is shown in Figure 1? Since the text generation process from the online text editor $N$ is not necessarily differentiable unless you use some trick, end-to-end training from the detector's prediction back to the encoder won't be possible. I would appreciate it if the authors could elaborate on this point." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The method works well and allows adapting against paraphrasing attacks during optimization. \n\n- The authors thoroughly evaluate their approach by including experiments on robustness, detectability and impact on runtime during inference. \n\n- The paper is clear in its presentation and presents the proposed ideas well. \n\n- The cross-LLM inference adapter is a great idea, and I have not seen one before for trainable watermarking methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose an end-to-end optimized watermarking method for large language models to enable the detection of AI-generated content. The goal is to enhance the robustness/text quality trade-off of current LLM watermarking methods. The challenge is that many operations, such as generating sequences of text, are not differentiable. The authors overcome this issue by using the well-known Gumbel-Softmax trick to backpropagate through the text-generating process. To enhance robustness, the authors incorporate a paraphrasing model during the optimization method, and they develop cross-LLM adapters to train on one LLM and deploy it to other LLMs. They show robustness against six text modification attacks and improved text quality." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The results from Figure 5 in their current form are not reproducible and lack transparency. I believe it should be a scatter plot that includes the quality degradation, and the authors should state the hyperparameters for each approach used for paraphrasing (e.g., the prompt used for paraphrasing). \n\n- Abdelnabi et al. [A] have previously proposed end-to-end watermarking for LLMs. They also use the Gumbel-softmax trick to differentiate through the text generation process. The authors should consider citing this work.\n\n- Figure 7, showing the difference in token distribution for the top 50 tokens, is difficult to interpret. It looks like the distance to the non-watermarked text is quite large (especially compared to KGW). Also, the choice of using 400 non-watermarked/watermarked samples is unclear. I think it would be better to plot detection accuracy against the size of the training dataset. \n\n- It is well known that perplexity is an unreliable metric used to measure text quality [C]. I was surprised that the authors did not include watermarked samples in their Appendix. There is a known problem: training LLMs with Gumbel-softmax is unstable and can lead to poor results for text generation [D]. Could the authors please show watermarked samples and (potentially) include a limitation section on current challenges when using this optimization method? \n\n--------\n[A] Abdelnabi, Sahar, and Mario Fritz. \"Adversarial watermarking transformer: Towards tracing text provenance with data hiding.\" 2021 IEEE Symposium on Security and Privacy (SP). IEEE, 2021.\n\n[C] Wang, Yequan, et al. \"Perplexity from plm is unreliable for evaluating text quality.\" arXiv preprint arXiv:2210.05892 (2022).\n\n[D] Yu, Zhang Ze, et al. \"Fine-tuning Language Models with Generative Adversarial Reward Modelling.\" arXiv preprint arXiv:2305.06176 (2023)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- I wonder how expensive is the training especially the requirement for the GPU memory. If I understand it correctly, the forward looks like: first token logits -> encoder -> sample the first token -> second token logits -> encoder -> sample the second token -> ... So we recursively call the encoder for n times if we generate n tokens. Would the computational graph be huge? Especially you also have to sample some tokens from the online text editor later. I wonder how did you train it exactly.\n- As I mentioned above, the method might suffer OOD issues. Would the encoder/decoder trained on WikiText-103 still be effective for other datasets? \n- Another thing that confuses me is that: where do you show the results for cross-llm inference? I noticed you mentioned: \"To train our end-to-end model, we chose OPT-1.3B as the online LLM for efficiency.\" Does this mean results for llama models are the transfer inference results? Or this is for the online text editor." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper includes an extensive section on experiments, including many state-of-the-art methods and attack scenarios.\n- The results for overall detectability and text quality look promising.\n- The encoder and decoders are small, so although an extra watermark encoder and decoder have been introduced, the generation and detection are very efficient." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors present a novel logit-based watermarking pipeline for text generation. Their approach incorporates Gumbel-Softmax for sampling and an online prompting technique for adversarial edits, allowing the encoder and decoder to be trained in an end-to-end fashion. The method achieves state-of-the-art detectability under various attacks while maintaining high-quality generated text." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The result on generation diversity is not great as the proposed method has the lowest diversity among all other methods. Even though this doesn't affect the results on the benchmarks, I think this might be a bad feature for certain tasks, like synthetic data generation.\n- The proposed method is training-based not like some of the baselines. The method might suffer OOD issues that the distribution of the prompt at the inference time is quite different from the training.\n- The proposed method used a classifier for detection, and this does not give us an interpretable result like a p-value. This might also be bad if we want to control the false positive rate during detection." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "### **Comparative Analysis**\n\n* The paper briefly mentions other training-based methods (UPV, SIR) but lacks detailed comparison\n* Please provide in-depth analysis of architectural differences and performance variations between this work and existing training-based approaches\n\n### **Efficiency and Generalization**\n* The cross-model inference time overhead is significant - what optimizations are possible?\n* How does the method handle LLMs not included in the cross-model converter?\n* What is the failure mode analysis?\n\n### **Evaluation Scope**\n* Evaluation should include more recent LLMs (e.g., Yi, Qwen)\n* Need broader testing across model architectures and scales\n\n### **Related Work and Claims**\n\n* Notable omission of generation-based watermarking methods (e.g., AWT [1], REMARK-LLM [2])\n* The \"first end-to-end framework\" claim requires more careful qualification\n\n[1] Adversarial watermarking transformer: Towards tracing text provenance with data hiding\n\n[2] REMARK-LLM: A robust and efficient watermarking framework for generative large language models\n\n### **Security Analysis**\n\n* How does the method perform against adaptive attacks where adversaries have full access to the system?\n* Need evaluation of undetectability and robustness when attackers can obtain paired watermarked/unwatermarked samples\n\n### **Architecture Choices**\n* Please justify the selection of LSTM as the decoder backbone\n* What alternatives were considered and why were they rejected?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* A distortion module is helpful to enhance the robustness." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors present an end-to-end training-based text watermarking method aimed at achieving an optimal trade-off between text quality and robustness, leveraging the logits-based watermarking framework introduced by Kirchenbauer et al. Specially, they jointly train additional encoder to generate logits perturbation to shift the tokens’ probability distribution and additional decoder to extract the watermarking signals from the text. In addition, the authors introduce distortion module, address the non-differentiable operations in the end-to-end training pipeline, and consider the generalization to different LLMs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* Insufficient coverage of relevant related work\n* Inadequate explanation of key methodological design choices\n* Evaluation on outdated LLM architectures\n* Limited adaptive attack evaluation\n* Unclear figure captions (specifically Fig. 2)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "N/A" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The proposed end-to-end method is original, and extensive experiments have been conducted to evaluate its quality, detectability, and robustness.\n\n2. The presentation is well-structured, making the paper easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposed an end-to-end optimization framework for achieving better trade-off between the robustness and the text quality. The authors validate the effectiveness of the proposed framework with comprehensive experiments on popular LLMs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Unclear motivation. The authors claimed “However, these existing approaches still fail to achieve an optimal trade-off between text quality and robustness”. However, the authors have missed an important line of works regarding the distortion-free watermark (Kuditipudi et al., 2024; Christ et al., 2024), which suggested we can embed watermarks into LLMs without affect the generation quality. Thus, the there is generally no trade-off between the text quality and robustness, and the claim in the paper is wrong.\n\n2. Limited contribution. Comparing to the previous works (Liu et al., 2024b; Huo et al., 2024), which also share an encoder-decoder structure for logits-based watermarking, the proposed method only introduce a jointly training network for achieving better trade-off between text quality and robustness. As the reviewer has pointed out in weaknesses 1, the trade-off generally does not exist. Thus, the contributions of the proposed method are unclear.\n\n3. The experimental results also cannot support the motivation of “achieving better trade-off between text quality and robustness”. In Figure 7. The KGW watermark has significantly better quality than the proposed watermark, although the detectability and the robustness of KGW are poor. In order to claiming the proposed method has achieved better trade-off than KGW, the authors should show the superior of the proposed method on all quality, detectability, and robustness axis. Besides, in Figure 5, we can also see that the proposed method does not always outperform the baselines in all scenarios." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We introduce the first logits-based end-to-end model for LLM watermarking, where encoder and decoder networks are jointly optimized to improve detection robustness and text quality." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024an,\ntitle={An End-to-End Model For Logits Based Large Language Models Watermarking},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0KHW6yXdiZ},\nnote={under review}\n}" }, "abstract": { "value": "The rise of large language models (LLMs) has increased concerns over source tracing and copyright protection for AI-generated content (AIGC), highlighting the need for advanced detection technologies. Passive detection methods usually face high false positives, while active watermarking techniques using logits or sampling manipulation offer more effective protection. Existing LLM watermarking methods, though effective on unaltered content, suffer significant performance drops when the text is modified and could introduce biases that degrade LLM performance in downstream tasks. These methods fail to achieve an optimal tradeoff between text quality and robustness, particularly due to the lack of end-to-end optimization of the encoder and decoder. In this paper, we introduce the first end-to-end logits perturbation method for watermarking LLM-generated text. By jointly optimizing the encoder and decoder, our approach achieves a better balance between quality and robustness. To address non-differentiable operations in the end-to-end training pipeline, we introduce an online prompting technique that leverages the on-the-fly LLM as a differentiable surrogate. Our method demonstrates superior detection robustness, consistently outperforming state-of-the-art (SOTA) methods by 1.2\\%, 4.0\\%, and 5.5\\% across 3 LLMs, averaged over 6 types of text distortions. Simultaneously, our approach achieves exceptional text quality, as evidenced by reduced text perplexity and improved performance in the downstream tasks with a margin of 19.2\\% and 3.03\\%. Our method can be easily generalized to different LLMs. The code is available in supplementary material." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "LLM watermarking", "End-to-end optimization", "Robustness" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/a90b055a250f1c4ff5207fd448a066fb03bff3eb.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/ded5fb8332aa1cb71982ec01a79c87e0fde7dd55.zip" }, "title": { "value": "An End-to-End Model For Logits Based Large Language Models Watermarking" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0L8wZ9WRah
Attention-aware Post-training Quantization without Backpropagation
main
Active
Quantization;Hyper-scale LLMs;Attention;Hessian
other topics in machine learning (i.e., none of the above)
3;3;3;5
5;3;5;3
3;2;2;3
2;2;2;2
2;3;1;3
3.5
4
2.5
2
2.25
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Refer to 2 in weakness. What is the effectiveness of proposed approaches in terms of efficiency?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The proposed BOA consider inter-layer dependencies within the attention module when optimize a weight-rounding mechanism. It is beneficial to maintain higher quantization accuracy, especially at low-bit precision.\n\n2. The proposed BOA method demonstrates impressive results, especially in the low-bit regime (e.g., INT2 quantization).\n\n3. The paper includes extensive experiments across multiple model types and sizes, demonstrating scalability across LLMs of different parameter counts." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a novel post-training quantization (PTQ) method, termed BOA (Backpropagation-free Optimization for Attention-aware PTQ), targeting large language models (LLMs) without relying on backpropagation. The approach introduces attention-aware Hessian matrices that capture inter-layer dependencies within the attention module, aiming to improve quantization accuracy, especially at low bit-widths (e.g., INT2). BOA incorporates techniques like Hessian relaxation and efficient computation of inverse Hessians to mitigate the high computational costs. The method is benchmarked against existing PTQ approaches on LLMs, demonstrating improved performance in terms of perplexity and zero-shot task accuracy." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Novelty Limitations: The primary contribution, the attention-aware Hessian matrix, is an incremental improvement over existing Hessian-based PTQ methods. While capturing inter-layer dependencies within the attention module is beneficial, the idea is not a novel quantization paradigm. \n\n2. The authors introduce optimizations approaches like Hessian relaxation and efficient computation of inverse Hessians, but the results did not show the effect of these optimization methods." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "The advantage over existing methods is not substantial, suggesting the need for further validation, such as, SmoothQuant, LLMC,QuIP etc." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The topic of this paper is of significant importance and represents one of the most active and rapidly evolving research areas in the field. As LLMs grow increasingly complex, their deployment on resource-constrained devices requires innovative solutions to reduce computational and memory demands. Quantization, as a compression technique, has gained considerable traction for enabling efficient deployment of LLMs without sacrificing model accuracy." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduced the BOA post-training quantization algorithm designed for LLMs that overcomes the limitations of traditional quantization methods, which struggle with inter-layer dependencies and backpropagation requirements in LLMs. BOA leveraged attention-aware Hessian matrices to better capture inter-layer interactions within the attention module, enhancing performance, especially at low bit-widths. The algorithm employed Hessian relaxation and head-wise simultaneous quantization, to attempt to reduce computational and memory costs, making it feasible for quantizing LLMs without backpropagation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The technical approach of this paper is relatively straightforward, lacking intricate or highly novel methodologies. Additionally, certain English terminology within the paper is used imprecisely, which may affect clarity and readability. The comparison methods are somewhat limited, providing a narrow benchmark for evaluating the proposed technique. Moreover, while the experimental results demonstrate some improvements, the advantage over existing methods is not substantial, suggesting the need for further validation, such as, SmoothQuant, LLMC,QuIP etc." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.How does the performance of BOA compare when tested on more advanced models, such as the LLaMA3 series, instead of the relatively outdated models used in the paper?\n2.How does BOA's accuracy compare to more recent quantization methods, such as QuaRot and SpinQuant?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.\tThe paper introduces an innovative PTQ method that cleverly captures inter-layer dependencies within attention modules through attention-aware Hessian matrices while avoiding backpropagation overhead. \n2.\tBOA is compatible with other techniques, such as SmoothQuant and Z-FOLD, enabling further improvements in quantization accuracy by integrating different quantization strategies." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a post-training quantization method called BOA that incorporates inter-layer dependencies without relying on backpropagation. BOA leverages attention-aware Hessian matrices to capture dependencies within the attention module, a relatively rare approach in existing PTQ methods. Additionally, BOA demonstrates compatibility with techniques like SmoothQuant and Z-FOLD, allowing for further enhancements in quantization performance. However, despite these strengths, BOA does not show sufficient memory and processing time benefits compared to existing PTQ methods. The experiments are conducted on outdated models, and the comparison methods lack recent advancements. Adding more experiments with up-to-date models and techniques would strengthen the paper." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tThe experiments are primarily conducted on BLOOM, LLaMA1, and OPT models, which are somewhat outdated compared to current state-of-the-art models. The paper lacks validation on more recent models, such as the LLaMA3 series.\n2.\tAlthough the paper introduces various techniques to reduce computational overhead and claims to use a Hessian-based strategy to avoid time-consuming gradient-based optimization, as shown in Table 13, BOA’s actual overhead in terms of memory and processing time is greater than GPTQ. Additionally, in Tables 3, 4, and 5, even under 2-bit quantization, BOA's improvement over GPTQ is marginal. For Table 6, it’s worth noting that GPTQ can also integrate certain quantization algorithms, like QuaRot [1] and SpinQuant [2], to achieve better results. Including comparisons with these methods is recommended.\t\n\n[1] Ashkboos S, Mohtashami A, Croci M L, et al. Quarot: Outlier-free 4-bit inference in rotated LLMs. arXiv preprint arXiv:2404.00456, 2024.\n[2] Liu Z, Zhao C, Fedorov I, et al. SpinQuant—LLM quantization with learned rotations. arXiv preprint arXiv:2405.16406, 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please refer weaknesses for details." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. Introducing inter-layer interaction in a training-free manner is innovative.\n2. The paper is well-written." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a training-free post-training quantization method based on GPTQ. It introduces inter-layer interaction by calculating Hessian matrices using an attention module instead of a simple linear module in LLMs. Additionally, the paper proposes techniques to improve the efficiency of Hessian matrix calculations." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The experimental setup is somewhat outdated. Additional experiments on newer models, such as LLama-2 and LLama-3, are needed.\n2. Although the paper introduces a training-free PTQ method, it may be slower than training-based methods. For example, Table 2 shows that BOA takes 1 hour to quantize 2.7B models, while GPTQ quantizes larger 13B models in only 21 minutes. OmniQuant, a training-based method, requires only ~1.1 hours for 7B models. The paper should provide comprehensive comparisons of quantization times to demonstrate the proposed method's effectiveness.\n3. The paper focuses on 2-bit per-channel quantization and mentions that \"group-wise parameters result in additional memory costs and processing time during inference.\" However, weight-only quantization aims to alleviate memory constraints during the decoding stage. Group-wise quantization introduces negligible overhead but significantly improves performance and is a common practice in existing inference engines. Therefore, the paper should include results for group-wise quantization." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a novel post-training quantization algorithm that considers inter-layer dependencies inside the attention module without relying on backpropagation." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024attentionaware,\ntitle={Attention-aware Post-training Quantization without Backpropagation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0L8wZ9WRah},\nnote={under review}\n}" }, "abstract": { "value": "Quantization offers a promising solution for deploying large-scale language models (LLMs) on resource-constrained devices. However, early quantization methods, developed for smaller networks like ResNet, rely on gradient-based optimization, which becomes impractical for hyper-scale LLMs with billions of parameters. While recently proposed backpropagation-free post-training quantization (PTQ) methods alleviate this issue, their performance is limited by a lack of inter-layer dependency consideration. In this paper, we introduce a novel PTQ algorithm that incorporates inter-layer dependencies without relying on backpropagation. The key innovation is the development of attention-aware Hessian matrices that capture inter-layer interactions within the attention module. Extensive experiments demonstrate that our approach significantly outperforms conventional PTQ methods, particularly at low bit-widths." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Quantization", "Hyper-scale LLMs", "Attention", "Hessian" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/1b6210fb7bc41a5685bb0f16f329e2cea4a831f1.pdf" }, "presentation": null, "primary_area": { "value": "other topics in machine learning (i.e., none of the above)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Attention-aware Post-training Quantization without Backpropagation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0LSAmFCc4p
Brain-inspired -Convolution benefits large kernels and aligns better with visual cortex
main
Active
Lp-Convolution;Receptive Field;Multivariate p-generalized normal distribution;Representation Similarity;Visual Cortex;Gaussian Sparsity
applications to computer vision, audio, language, and other modalities
3;5;8;8
4;4;4;3
3;2;3;3
1;2;3;3
3;3;4;4
6
3.75
2.75
2.25
3.5
-0.544331
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "None" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "+ Novel inductive bias for convnets motivated by biology\n + Overall fairly well-written paper\n + Well-motivated and well-executed experiments" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a brain-inspired approach of constraining the weights of convnets with a p-generalized Gaussian envelope. The authors demonstrate some minor improvements in performance on relatively small image datasets such as CIFAR-100 and TinyImageNet. They further claim that the learned representations of more \"brain-like\" convnets have higher representational similarity to the mouse visual system than their more classical counterparts." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The effect sizes are really small, calling into question the practical impact\n- Only \"toy\" datasets are explored\n- Experiment on representational similarity not convincing\n\n\n### Detailed explanation\n\nWhile I find the paper well motivated and the idea original, I see the paper mostly as a negative result given the small effect sizes observed across most of the tables and the \"toy\" nature of datasets such as CIFAR-100 and TinyImageNet. The paper now has undergone several revisions that only reinforce this conclusion.\n\nThere is a statistically significant improvement due to Lp-Conv for some classical architectures, but not all of them (e.g. ResNet). Generally, the improvements are small (a few percent). Given that architectural modifications alone can now push accuracy on CIFAR-100 >90% (https://arxiv.org/abs/2304.05350v2), the 1–2% improvements in the 60–70% range feel insignificant. Experiments on more modern architectures such as RepLKNet (Table 3) show the same pattern, if anything with decreasing effect size (<1% improvement). Similarly, the transfer learning experiment using ConvNeXt-V2 (Table 4) shows close to no effect. There are no experiments on closer-to-real-world datasets like ImageNet (although that's by now a fairly standard problem that can be done on a consumer GPU), although I should say that I do not expect major effects in that experiment, either. The data simply show that the inductive bias doesn't do much.\n\nThe experiment on representational similarity yields equally small effect sizes, again insignificant on many architectures. In addition, the comparison is done to several mouse visual areas, some of which aren't even part of the \"ventral\" stream for which a convnet trained on image classification would be a reasonable model." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "See weakness for suggestions.\n\nPotential typo:\nline 159: a solution to the of large kernel problem in CNN -> a solution of the large ..." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The idea of a $L_p$ convolution is original and could have wide application in visual tasks that require flexible receptive field size or in general tasks that require both local and global information from visual input. Showing its ability to transfer to any pretrained network greatly lowers the threshold to apply this for a wide range of tasks. The choices of Sudoku task and the follow up ablation analysis is solid and demonstrates well the strength of this method. Out of all papers that take inspirations from neuroscience and try to utilize it to improve neural nets, this paper stands out in actually providing a fundamentally different implementation of CNNs.\n\nOther that than the points mentioned above, the benchmark testing was thorough and presented clearly. The visualization of both the $L_p$ mask and convolution is very helpful for understanding the concepts. The writing is very clear." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces $L_p$-Convolution by integrating the multivariate p-generalized normal distribution into a $L_p$ masks for convolution filters. It allows the network to adapt to different receptive field shape and train efficiently with large kernel. The paper show $L_p$-Convolution has a advantage in tasks such as sudoku challenge." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper does show through table 1-4 that $L_p$-CNNs can train with large kernels and have some advantage in robustness as well as accuracy in benchmark test. However the improvements over baseline models are small. I don't think these numbers convinces me how useful the $L_p$-CNNs could be. Aside from the sudoku task, the paper didn't really show the advantage of efficiently trained large kernal $L_p$-CNNs through a task that actually could really benefit from large kernels. I would suggest including some more tasks that requires processing of context, or even tasks ViT excels at for comparison. \n\nFor the robustness benchmark as well as the Sudoku task, it could be informative to include performance of ViTs as well. \n\nLastly, it is pretty well established throughout the paper that $p_{init}=2$ is the most useful for most task, and resembles the most to the biological system. I am not sure it is worth having another section (sec. 6) dedicating to comparing similarity of RSM across different $p$s. If the author were to demonstrate it is also a better model for brain representational alignment that I would recommend doing a more thorough study including more datasets, brain regions and variation of CNNs." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "**(More of a curiosity)** For future developments of this work, it would be interesting to explore connections with anisotropic diffusion (Perona & Malik, *Scale-space and edge detection using anisotropic diffusion*, 1990). In standard convolution, there exists a well-established mapping between convolution operators and isotropic diffusion processes (as explored in Scale-Space theory, particularly in Koenderink, *The structure of images*, 1987; and Lindeberg, *Scale Space theory in computer vision*, 1994). How might Lp-convolution relate to or extend these theoretical frameworks?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is overall well-written, with sections that tell a clear, sequential story. The usage of bold characters to highlight important parts is particularly appreciated. This is a very strong contribution across multiple aspects:\n\n* Connectivity patterns as inductive biases are largely unexplored within this community. The biological inspiration effectively guides the search for plausible connectivity patterns, and the approach proposed in this submission is particularly apt. The work presents a complete narrative, from biological mechanism inspiration to the implementation of Lp-convolution for neural activity prediction in V1 and representational similarity analysis.\n\n* The paper's approach to addressing large kernel network training challenges could potentially bridge the performance gap between transformers and CNNs in image classification tasks.\n\n* The mathematical formulation is sound and accessible, with figures (e.g., Figure 1 and 2) that effectively illustrate concepts and build intuition about parameter effects.\n\n* The choice of the Sudoku challenge adds significant value, serving as an excellent demonstration of the model's capabilities in an easily understandable context (especially for $L_p$ mask shapes)\n\n* The Appendix comprehensively addresses potential questions, demonstrating thorough consideration of the work's implications and limitations." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces Lp-convolution, a novel approach to convolutional neural networks (CNNs) inspired by biological visual processing. The work addresses fundamental differences between artificial and biological visual systems: while traditional CNNs employ rectangular, dense, and uniform connectivity patterns, biological visual systems feature circular, sparse, and normally distributed connections. Additionally, the paper tackles the longstanding challenge that large kernel sizes in CNNs typically don't improve performance despite increased parameters.\n\nThe key innovation is the introduction of Lp-convolution, which uses multivariate p-generalized normal distribution (MPND) to bridge these biological-artificial differences. The method implements trainable \"Lp-masks\" that can adapt their shape through parameters, enabling flexible receptive field shapes that better match biological patterns. Technically, this is achieved by applying channel-wise Lp-masks that overlay onto convolutional kernels, with shape parameters that can be trained for task-dependent adaptation.\n\nThe authors demonstrate several significant findings: Lp-convolution improves the performance of CNNs with large kernels, with optimal results achieved when the initial p parameter approaches 2 (matching biological Gaussian distribution). Moreover, neural representations show better alignment with the visual cortex when connectivity patterns are more biologically plausible. \n\nThe practical impact of this work is threefold: it enables effective utilization of larger kernels in CNNs, achieves more biologically plausible artificial neural networks, and maintains compatibility with existing CNN architectures." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I don't think significant weaknesses are present in this work. The paper should be accepted as it is." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* In the authors' claim regarding \"Lp-convolution with biological constraint,\" specifically the \"Gaussian structured sparsity,\" what theoretical and empirical evidence supports this biological constraint? \n\n* Across various experiments, since ppp is a learnable parameter, what typical values does it converge to, and are there any observable trends or variations across different datasets? Could the authors interpret these findings in relation to biological insights?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* Experimental results indicate that CNN neural representations exhibit a stronger alignment with the visual cortex when the Lp-mask shape approximates a Gaussian distribution. \n\n* Testing the conformational adaptability of Lp-masks in the Sudoku challenge yielded interesting results, highlighting the flexibility of this approach." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper primarily investigates variations in local connectivity patterns within CNNs, examining whether incorporating biologically inspired connectivity structures can improve model performance and increase alignment with brain representations. Specifically, the authors introduce Lp-convolution, which utilizes the multivariate p-generalized normal distribution (MPND). The proposed adaptable Lp-masks aim to bridge the gap between artificial and biological connectivity patterns, finding optimal configurations through task-based adaptation to enable strong performance in tasks requiring flexible receptive field shapes." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* Consistency in terminology would improve clarity; alternating between “Lp-convolution” and “Lp-mask” can be confusing. Using a single term throughout would make the concepts easier to follow. \n\n* The mention of Vision Transformers (ViTs) in the introduction feels tenuous, as they are not included in subsequent experiments, nor are they closely related to the main theme of the paper. \n\n* In lines 108-110, where it is stated that “CNNs have rectangular, dense, and uniformly distributed connections, as opposed to the circular, sparse, and normally distributed connections in biological neurons,” this description would benefit from supporting references regarding the shapes of receptive fields in biological neurons. It’s also worth questioning whether this statement accurately characterizes CNN weights, as CNNs trained to model retinal ganglion cells, for instance, have demonstrated sparse weight patterns ([1]-[5]). \n\n* Lines 137-138 mention that “we optimized parameters of p and σ in MPND (Fig.1e, Eq.1)...” However, Eq.1 and the text do not define σ. It’s also recommended that the authors confirm Eq.1’s form by referencing the standard expression of a multivariate Gaussian function. \n\n* Integrating Lp-masks in CNNs does not appear to significantly improve recognition accuracy across datasets. Comparing this approach to ViTs, it’s unclear if it achieves current state-of-the-art performance. \n\n* The justification for using large, sparse kernels feels somewhat weak. Aside from achieving marginal improvements in RSM alignment with the visual cortex, it’s unclear how this approach benefits contemporary computer vision tasks. \n\n\nReferences:\n\n[1] Maheswaranathan, Niru, et al. \"Deep learning models reveal internal structure and diverse computations in the retina under natural scenes.\" BioRxiv (2018): 340943. \n\n[2] Tanaka, Hidenori, et al. \"From deep learning to mechanistic understanding in neuroscience: the structure of retinal prediction.\"  Advances in neural information processing systems 32 (2019).\n\n[3] Lindsey, Jack, et al. \"A unified theory of early visual representations from retina to cortex through anatomically constrained deep CNNs.\"  arXiv preprint arXiv:1901.00945 (2019).\n\n[4] Yan, Qi, et al. \"Revealing fine structures of the retinal receptive field by deep-learning networks.\"  IEEE transactions on cybernetics 52.1 (2020): 39-50.\n\n[5] Zheng, Yajing, et al. \"Unraveling neural coding of dynamic natural visual scenes via convolutional recurrent neural networks.\" Patterns 2.10 (2021)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024braininspired,\ntitle={Brain-inspired -Convolution benefits large kernels and aligns better with visual cortex},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0LSAmFCc4p},\nnote={under review}\n}" }, "abstract": { "value": "Convolutional Neural Networks (CNNs) have profoundly influenced the field of computer vision, drawing significant inspiration from the visual processing mechanisms inherent in the brain. Despite sharing fundamental structural and representational similarities with the biological visual system, differences in local connectivity patterns within CNNs open up an interesting area to explore. In this work, we explore whether integrating biologically observed receptive fields (RFs) can enhance model performance and foster alignment with brain representations. We introduce a novel methodology, termed $L_p$-convolution, which employs the multivariate $L_p$-generalized normal distribution as an adaptable $L_p$-masks, to reconcile disparities between artificial and biological RFs. $L_p$-masks finds the optimal RFs through task-dependent adaptation of conformation such as distortion, scale, and rotation. This allows $L_p$-convolution to excel in tasks that require flexible RF shapes, including not only square-shaped regular RFs but also horizontal and vertical ones. Furthermore, we demonstrate that $L_p$-convolution with biological RFs significantly enhances the performance of large kernel CNNs possibly by introducing structured sparsity inspired by $L_p$-generalized normal distribution in convolution. Lastly, we present that neural representations of CNNs align more closely with the visual cortex when -convolution is close to biological RFs." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Lp-Convolution", "Receptive Field", "Multivariate p-generalized normal distribution", "Representation Similarity", "Visual Cortex", "Gaussian Sparsity" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/b7c2aa857617b53a89dff374ee8c1cf1ee787a1f.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Brain-inspired -Convolution benefits large kernels and aligns better with visual cortex" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0Lpz2o6NDE
Tex4D: Zero-shot 4D Scene Texturing with Video Diffusion Models
main
Active
4D texture synthesis;consistent video generation;zero-shot
generative models
3;5;5;5
4;4;3;4
2;2;2;3
2;2;2;2
3;2;3;2
4.5
3.75
2.25
2
2.5
-0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Although the experiments provide some evidence of the proposed method’s effectiveness, several concerns remain:\n\n1. Could the authors provide additional qualitative and quantitative comparisons in the ablation study? With only one demonstration, it is difficult to convincingly assess the effectiveness of the proposed method.\n\n2. The authors suggest that video diffusion models struggle with multi-view consistent texturing for 3D mesh sequences due to a lack of 3D geometry awareness. However, the approach already uses a depth-aware video diffusion model, which inherently includes some geometric awareness. Why does this straightforward combination not achieve the desired consistency? Does this imply that depth-aware video diffusion models alone cannot guarantee multi-view consistency even with depth information? If so, could the authors provide performance metrics or visual comparisons showing results when using only the depth-conditioned video diffusion model as a prior? Additionally, for a single viewpoint, does the video diffusion model produce temporally consistent results? If not, visual examples would help clarify.\n\n3. Since a mesh input is available, a straightforward approach could be to texture the mesh on the first frame using methods like Text2Tex or SceneTex, then animate the textured mesh. This method might improve efficiency and naturally maintain multi-view consistency across frames. How does this alternative approach compare in terms of both methodology and performance? An in-depth discussion of these differences would be beneficial.\n\n4. The authors mention that for each predefined viewpoint, a sequence of K rendered meshes is used as input and individually textured by the depth-guided diffusion model. Could the authors clarify the motivation behind this setup? Since the videos are generated separately for each view, multi-view inconsistencies are expected. Why introduce this setup if it inherently leads to consistency issues at the start?\n\n5. While using UV textures for each mesh can enhance multi-view consistency, this approach seems more like an averaging of multiple viewpoints to produce a smoother result. Can the authors elaborate on how this averaging mechanism ensures true multi-view consistency?\n\n6. Given that the current method requires rendering V views for each mesh in the sequence, which may be computationally intensive, could the authors discuss the efficiency of the method? Details on the time required to process a sample would help assess the method's practicality.\n\n7. It would be beneficial to include video visualizations or comparative examples to further illustrate the method's performance and effectiveness." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "See below." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a novel framework for generating textures for mesh sequences. The authors utilize a depth-conditioned video diffusion model to ensure temporal consistency in videos generated from rendered mesh sequences for each predefined viewpoint. To achieve multi-view consistency, they adopt a UV space texture aggregation strategy. Additionally, they propose a modified sampling approach to address the issue of blurriness in the generated textures." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "See below." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. In the paper, the authors mentioned that the mesh texture could be significantly influenced by the background, providing an example with a white background. I’m curious how the generated texture might look if a non-white background was used, especially one that contrasts strongly with the foreground object. How would such a background affect the consistency and quality of the generated texture?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The authors assert that this is the first method developed specifically for 4D scene texturing.\n2. The authors introduce a multi-frame consistent texture generation technique, demonstrating improved consistency in results compared to baseline methods.\n3. The paper is fluent and well-written, contributing to its readability and overall clarity." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposed a 4D scene texturing approach by video diffusion models, a zero-shot pipeline for generating temporally and multi-view consistent textures. In order to aggregate multiview latents in UV space, they discovered the issue of \"variance shift\" caused by their aggregation, and proposed to modify DDIM sampling process to address the issue. By UV blending during denoising steps, the issue of self-occlusion is addressed and synchronized in invisible regions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The generated textures do not blend seamlessly with the background, creating a disjointed appearance that resembles separate foreground and background elements stitched together.\n2. Despite claims of multi-view consistency, flickering effects are observed across different views, indicating instability in rendering.\n3. Some of the compared methods, such as TokenFlow and Text2Video-Zero, do not utilize mesh or depth inputs, making direct comparisons less equitable." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Overall, the experimental results are quite satisfactory; however, there is a lack of explanation regarding the advantages of the pipeline compared to other pipelines using 2D poses and textured meshes.\n\nAnimation can drive the mesh. Are the position and rotation of the mesh manually controlled, such as in the second example on the first page?\n\nHow is the animated mesh obtained? We observe some temporal changes in Figure 5. Is this one of the contributions of your paper? How do you distinguish between temporal changes and temporal inconsistencies, as there are some temporal inconsistencies in your results?\n\nWould using textured meshes yield better outcomes? What are the advantages of generating videos with untextured meshes compared to textured meshes?\n\nWhat is the difference between using a 2D pose and an animated mesh?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper is the first work to perform video generation based on animated mesh sequences, while its UV mapping strategy ensures multi-view consistency. The experimental results show significant advantages compared to some existing works." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces 4D scene texturing to generate textures that are consistent both temporally and across multiple views for animated mesh sequences. Tex4D, uses 3D geometry to synchronize diffusion processes, incorporates video generation model insights for temporal consistency, and modifies the DDIM sampling process to improve texture clarity." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Under the current pipeline, this work has yielded highly effective results. However, the importance of this pipeline should be further clarified, such as by comparing it with pipelines based on 2D poses or textured meshes. The paper should include more comprehensive comparisons to highlight the contribution of the pipeline. For example, is it a reasonable pipeline to first generate textured meshes and then use animated meshes for video generation?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Q: \"While these methods produce multi-view consistent textures for static 3D objects, they do not address the challenge of generating temporally consistent textures for mesh sequences.\" I would appreciate further clarification on the motivation behind \"generating temporally consistent textures\" for mesh sequences. Could you provide examples where dynamic 4D texturing is essential and cannot be achieved through traditional methods?\n\nQ: How does the model ensure robustness when dealing with varying UV mapping results?\n- How sensitive is the method to different UV unwrapping techniques?\n- Did the authors experiment with different UV mapping strategies, and if so, what were the results?\n- Are there any limitations or best practices for UV mapping when using this method?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "# Strengths\n\n- The paper is well-written, making it easy to understand and follow.\n- The related works are sufficiently covered.\n- Several experiments are conducted for demonstrating its effectiveness." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "# Summary\n\nThis paper focuses on creating temporally consistent and realistic textures for mesh sequences. The input is an untextured mesh sequence and a text prompt. To achieve this, a method named Tex4D is proposed, which is a zero-shot approach that integrates geometry information from mesh sequences with video diffusion models, specifically the depth-conditioned CTRL-Adapter (Lin et al., 2024), to produce multi-view and temporally consistent 4D textures. The model synchronizes the diffusion process across different views through latent aggregation in the UV space. Additionally, a reference latent texture is introduced to strengthen the correlation between frames during the denoising process." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Regarding the contribution and motivation, I still wonder about the specific meaning of \"4D texturing\" for the object and how this differs from a 3D model that is first textured and then animated using skinning techniques. Even for dynamic textures, one could also generate the dynamic texture for a 3D model and then animate the character through skinning. This approach seems useful **if** the mesh is also significantly dynamic, such as with topology changes. Further, when it comes to the video background, I noticed in the supplementary video that there are some dynamic effects from the proposed method, but they are not that significant. Could one first perform 3D texturing and then render it with a generative background video?\n\nI think the following suggestions could be helpful in justifying the significance of the setting in this paper.\n- Provide specific examples or use cases where 4D texturing offers advantages over traditional 3D texturing and animation.\n- Clarify how the proposed method handles dynamic meshes with topology changes, if applicable.\n- Compare the method with a pipeline of 3D texturing followed by rendering with a generative background video, highlighting any benefits of the presented \"integrated\" approach.\n\nI also have concerns about the novelty of the paper. The entire pipeline can be seen as a depth-conditioned CTRL-Adapter for mesh sequence texturing with UV space aggregation, which feels like a straightforward composition of existing models. I would prefer to see a simple yet effective method of tackling a critical problem. However, as I am still uncertain about the meaning/significance of \"4D texture,\" this makes me somewhat skeptical about the proposed pipeline.\n\nI think the authors could provide some arguments for the novelty of the proposed method:\n- Highlight the key technical innovations in the pipeline beyond the composition of existing models.\n- Explain how the introduced method addresses specific challenges in 4D texturing that are not easily solved by existing methods.\n- Provide a clearer definition and motivation for \"4D texturing\" to help readers understand its significance in the context of their work (similar to the previous questions).\n\nI understand the difficulty in evaluating the results, but it would be helpful and necessary to conduct an evaluation of Appearance Quality, Spatio-temporal Consistency, and Consistency with Prompt via a user study - The quantitative evaluation is insufficient.\n\nThe following action can be taken:\n- Conduct a user study evaluating the specific aspects, e.g., Appearance Quality, Spatio-temporal Consistency, and Consistency with Prompt, and compare the proposed method with previous models.\n\nLimitations and Future Works should be included. \n- For instance, the authors may discuss the current limitations of their approach, such as some failure cases or more specifically, the types of meshes or textures it struggles with, etc.\n- Potential future improvements or extensions to the method.\n- Broader implications or applications of this work in related fields.\n\n\n# Minor Comments\n\n- \"To resolve this issue, we analyze the underlying causes and propose a simple yet effective modification to the DDIM (Song et al., 2020) sampling process.\" In the introduction section, it would be beneficial to briefly explain how you achieved this." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We present a method that takes an untextured, animated mesh sequence along with a text prompt as inputs, and generates multi-view, temporally consistent 4D textures." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024texd,\ntitle={Tex4D: Zero-shot 4D Scene Texturing with Video Diffusion Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0Lpz2o6NDE},\nnote={under review}\n}" }, "abstract": { "value": "3D meshes are widely used in computer vision and graphics because of their efficiency in animation and minimal memory footprint. They are extensively employed in movies, games, AR, and VR, leading to the creation of a vast number of mesh sequences. However, creating temporally consistent and realistic textures for these mesh sequences remains labor-intensive for professional artists. On the other hand, video diffusion models have demonstrated remarkable capabilities in text-driven video generation, enabling users to create countless video clips based solely on their imagination. Despite their strengths, these models often lack 3D geometry awareness and struggle with achieving multi-view consistent texturing for 3D mesh sequences. In this work, we present Tex4D, a zero-shot approach that integrates inherent 3D geometry knowledge from mesh sequences with the expressiveness of video diffusion models to produce multi-view and temporally consistent 4D textures. Given an untextured mesh sequence and a text prompt as inputs, our method enhances multi-view consistency by synchronizing the diffusion process across different views through latent aggregation in the UV space. To ensure temporal consistency, we leverage prior knowledge from a conditional video generation model for texture synthesis. However, straightforwardly combining the video diffusion model and the UV texture aggregation leads to blurry results. We analyze the underlying causes and propose a simple yet effective modification to the DDIM sampling process to address this issue. Additionally, we introduce a reference latent texture to strengthen the correlation between frames during the denoising process. To the best of our knowledge, Tex4D is the first method specifically designed for 4D scene texturing. Extensive experiments demonstrate its superiority in producing multi-view and multi-frame consistent videos based on untextured mesh sequences." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "4D texture synthesis", "consistent video generation", "zero-shot" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/41d6d04e90ec21c6187699bfa5d5e0a4c5b68b32.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/c5225d61a0ea3ecd6d471f9454fc51950ac3d004.zip" }, "title": { "value": "Tex4D: Zero-shot 4D Scene Texturing with Video Diffusion Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0MVWOHwHDb
Retrieval-Augmented Language Model for Knowledge-aware Protein Encoding
main
Active
Knowledge Graphs; Protein Science; Representation Learning
applications to physical sciences (physics, chemistry, biology, etc.)
3;5;6;6
3;5;3;4
2;3;3;3
2;2;3;3
2;3;3;3
5
3.75
2.75
2.5
2.75
0.246183
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I have listed my questions in weaknesses." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The method effectively carries the external knowledge injected during pretraining into downstream, significantly mitigating catastrophic forgetting. Additionally, the knowledge retrieval process not overly complex.\n2. The proposed relation-GO combinations further enhance retriever’s ability to recall the informative knowledge.\n3. The authors demonstrate the methods’ effectiveness across multiple tasks and conduct thorough ablation studies, such as the effect of without the neighboring information during inference.\n4. The paper is well-written and clear" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Instead of implicitly modeling knowledge information, the paper proposes knowledge-aware retrieval-augmented protein language model (Kara), which enables consistent knowledge-augmented modeling when applied to downstream protein-related tasks. During the pretraining stage, the authors try to model the structural information in the protein knowledge graph, such as neighboring and high-order connectivity. In the fine-tuning stage, a knowledge retriever is used to bridge the optimization gap between pretraining and fine-tuning, allowing for seamlessly adapting to knowledge updates.\nThe authors conduct extensive experiments and demonstrate that this unified knowledge modeling process consistently outperforms existing knowledge-enhanced models across six protein-related tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. If the protein belongs to an under-studied or new protein family, does this retrieval method have certain limitations, especially when these proteins have very low sequence identity to known (trained) proteins? It would be better to include experiments on under-studied proteins to demonstrate, possibly by a simulated way that splitting clusters of low-identity proteins into training and validation sets.\n2. Further, does the method have potential to uncover patterns of new proteins and their associations with existing?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Protein downstream tasks often require different kinds of knowledge, e.g. PPI requires knowledge about the functions and relations of the two proteins, contact prediction requires evolutionary and structural knowledge. Wonder if the authors could further provide insights on how knowledge & structural information differentiate across tasks. For example, why introducing more graph structural knowledge could improve the performance on contact prediction." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The performance improves on most tasks (following the same experiment tasks and settings as Ontoprotein) compared to Ontoprotein and KeAP.\n\n- The encoding style of Kara combines strengths of Ontoprotein and KeAP: Ontoprotein uses contrastive pretraining to first obtain structure-intensive graph embedding and then inject into language model, while KeAP direct encodes related knowledge in tuples with language encoder. Differently, Kara encodes 1-hop GO entity as knowledge, and 2-hop entities as structure to provide more detailed graph knowledge for the protein language model. \n\n- The knowledge retriever maps new protein sequence to GO entities, which could make it possible to generalize to proteins not directly covered by the knowledge graph." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces Kara that uses information from protein knowledge graphs to improve protein language models. Kara directly injects relevant knowledge during both pre-training and fine-tuning phases, utilizing a knowledge retriever that predicts gene descriptions for new proteins. Kara involves introduction of several key components: Contextualized Virtual Tokens that fuse knowledge and structural information into protein representations; Knowledge injection both at post-training and fine-tuning stages; Retrieval of relevant proteins and graph information with a dense retriever." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. My major concern of this work is its technical contributions, which closely follows OntoProtein and KeAP. The main improvement of Kara compares to Ontoprotein and KeAP is that it encodes both structural information (relations in GO) and knowledge (knowledge stored in each triple) within the contextualized virtual tokens. Ontoprotein uses the same pipeline to encode protein knowledge graph and inject embeddings into the language model, so the technical contributions are minor. \n\n2. The structural regularization (Eq. 6) obtained from two-hop entities might be weak or misleading. ProteinKG25 is a sparse knowledge graph and entities involve not only proteins as well as biological processes and molecular functions. What is the percentage of proteins that have 2-hop protein neighbors and are the neighbors all functional similar ? Neighbors may not be similar proteins but could be proteins that could interact with each other. Their function may not be similar." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.How is the ProteinKG25 knowledge graph selected? There are many other well-known protein-related multi-omics knowledge graphs, such as PharmKG (Briefings in bioinformatics, 2021), CKG (Nature biotechnology, 2022). Do the types and numbers of entities and relationships affect model performance?\n\n2.The knowledge graph contains only positive samples for interaction-based tasks. Did the authors incorporate negative sampling during training? If so, please provide additional details on how this was implemented.\n\n3.ProteinKG25 is used as the KG, but the model is evaluated on six representative tasks, it is unclear how the entities of these datasets are linked to the knowledge graph.\n\n4.Where is Table 7? If I missing \n\n5.From many experimental results (e.g., Table 3 and Table 6), we can see that KeAP has achieved comparable performance to Kara. Please describe the difference between the two methods in detail, and be curious about the complexity and number of parameters of the two methods.\n\n6.The experimental design is good, however there are one limitations that preclude the reader to understand how generalizable the method is. Only one protein embedding method (ProtBert) is tested for the pre-trained embeddings." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "In general, the paper is clearly expressed and organized. The authors' innovation of direct injecting the protein knowledge graph into large language model to explore the knowledge-aware protein representation learning, which will have some implications for the biomedical field. In addition, the experiments in the discussion section demonstrate that the virtual tokens and structure-based regularization are good at capturing high-order information of protein knowledge graph from a novel perspective." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "How to effectively transfer knowledge from knowledge graphs to large language model is a challenging task. In this paper, the authors is the first to propose a novel knowledge retriever, named Kara, that directly injects the correlated knowledge into protein language models and aligns the protein knowledge graph with downstream tasks. Specifically, the contextualized virtual tokens is designed to enable the direct injection of knowledge and high-order structure information into protein representations. Extensive experimental results, arranging from amino acids contact prediction to semantic similarity inference, demonstrate the superior performance of proposed Kara." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The Introduction needs to provide more background information, such as the specific role of Knowledge Graphs (KGs) in this context, the benefits they offer, and the rationale behind exploring KG-based methods." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "see above." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "It uses a novel knowledge retriever to predict gene descriptions for new proteins during both pre-training and fine-tuning stages, which helps in aligning with PKGs and improves knowledge retention.\n\nThese tokens enable token-level integration of knowledge and structural information into protein representations, enhancing the model’s ability to handle high-order knowledge." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces the Kara model, which integrates protein knowledge graphs (PKG) directly into protein language models (PLM) to enhance understanding of biological functions encoded in protein sequences." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The performance of the model heavily relies on the quality and the extent of the PKGs used, which might limit its application if relevant knowledge graphs are incomplete or outdated.\n\nWhile the model shows improvements in task-specific contexts, its ability to generalize across broader protein types or different biological conditions remains uncertain." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a knowledge-aware retrieval-augmented protein language model, achieving the first unified and direct integration of protein knowledge graphs and protein language models. Performance on 6 downstream tasks verify its superiority." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024retrievalaugmented,\ntitle={Retrieval-Augmented Language Model for Knowledge-aware Protein Encoding},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0MVWOHwHDb},\nnote={under review}\n}" }, "abstract": { "value": "Protein language models often struggle to capture the biological functions encoded within protein sequences due to their lack of factual knowledge (e.g., gene descriptions of proteins). Existing solutions leverage protein knowledge graphs (PKGs), using knowledge as auxiliary encoding objectives. However, none of them explored the direct injection of correlated knowledge into protein language models, and task-oriented knowledge integration during fine-tuning, making them suffer from insufficient knowledge exploitation and catastrophic forgetting of pre-trained knowledge. The root cause is that they fail to align PKGs with downstream tasks, forcing their knowledge modeling to adapt to the knowledge-isolated nature of these tasks. To tackle these limitations, we propose a novel knowledge retriever that can accurately predict gene descriptions for new proteins in downstream tasks and thus align them with PKGs. On this basis, we propose Knowledge-aware retrieval-augmented protein language model (Kara), achieving the first unified and direct integration of PKGs and protein language models. Using the knowledge retriever, both the pre-training and fine-tuning stages can incorporate knowledge through a unified modeling process, where contextualized virtual tokens enable token-level integration of high-order knowledge. Moreover, structure-based regularization is introduced to inject function similarity into protein representations, and unify the pre-training and fine-tuning optimization objectives. Experimental results show that Kara consistently outperforms existing knowledge-enhanced models in 6 representative tasks, achieving on average 5.1% improvements." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Knowledge Graphs; Protein Science; Representation Learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/efbc73bf06f64b99f93e08b3ae4738c729593fbd.pdf" }, "presentation": null, "primary_area": { "value": "applications to physical sciences (physics, chemistry, biology, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Retrieval-Augmented Language Model for Knowledge-aware Protein Encoding" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0MhlzybvAp
Balanced Learning for Domain Adaptive Semantic Segmentation
main
Active
Semantic segmentation
applications to computer vision, audio, language, and other modalities
5;5;6;6
4;4;5;3
2;3;3;3
2;3;3;3
3;4;3;3
5.5
4
2.75
2.75
3.25
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "See the weakness section." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper provide a new way to measure the class distribution changes in semantic segmentation by the logits distribution. \n2. The proposed module could easily be applied to existing UDA for semantic segmentation methods, potentially have a broad use in this area.\n3. The proposed module is generally effective on most of the classes in the two benckmark tasks.\n4. The visual aid is good, provide an intuition of the motivation, also demostrates the effectiveness of the proposed module." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel approach called BLDA to address class bias in domain adaptation for semantic segmentation tasks. It first evaluates prediction bias across different classes by analyzing the network's logits distribution. Then, a a post-hoc method is designed to adjust logits distributions after training. With the logits changes, a real-time logits values adjustment module is proposed by using GMMs to estimate logits distribution parameters online. The author then introduces cumulative density estimation as shared structural knowledge to connect the source and target domains. An additional regression head in the network predicts the cumulative distribution value of samples, which represents class discriminative capability, further enhancing adaptation performance on semantic segmentation tasks. The results in the experiments shows its effectiveness as a module addition to selected existing DA for segmentation baselines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The proposed method relies on the logits distribution. However, this distribution can be affected by data quality and model architecture, which can affect the accuracy of bias assessment.\n2. As a DA for segmantation task, a very severe issue is its efficiency concern. Adaptation process already cost a lot of time and computational resources, the proposed method seems exacerbated this issue by multiple GMMs. An efficiency study including wall-clock time or other efficiency measurement will be good to discuess the trade-offs between class-balanced performance and the actual cost.\n3. If the anchor distribution is far away from the true distribution of the target domain, logits alignment may be suboptimal, meaning if the domain gap is large, this part may be not work.\n4. As a module proposed rather than a whole algorithm, its effectiveness is expected to be confirmed on a considerable large amount of baselines methods, however, only few of them are studied and compared only for Transformer-based methods. I would recommand to evaluate on more baselines such as [1][2][3] and backbones (such as Deeplab v2 Deeplab V3+, for methods such as ProDA) to conform its effectiveness. especially those even have more severe class-imbalance issues.\n5. There exist a huge amount of methods or loss functions targeting class-imbalanced issue (for or not for semantic segmentation), some need in related works and some need a experiments for comparison, but only few of them listed and discussed. \n6. Since the classes have been categorised as over/under predicted, group them in the experiments and study would be better to understand the module effectiveness on classes with different characteristics.\n\nI will scoring up or down based on the author's reply.\n\n[1]. Domain adaptive semantic segmentation by optimal transport\n\n[2]. DiGA: Distil to Generalize and then Adapt for Domain Adaptive Semantic Segmentation\n\n[3]. Prototypical contrast adaptation for domain adaptive semantic segmentation" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. For weakness 1, could you conduct a theoretical complexity analysis comparing the proposed BLDA with the baseline? Additionally, please report and analyze the actual inference time, training time, and memory usage, along with a comparison to baseline methods (without adding BLDA).\n\n2. For weakness 2, could you integrate BLDA into recent UDA segmentation methods [A], [B], [C], and [D]?\n\n3. The mentioned works are highly relevant but lack citations in this paper. Could you update Section 2.1 (Related Work) to include all necessary references?\n\n[A] Focus on Your Target: A Dual Teacher-Student Framework for Domain-adaptive Semantic Segmentation\n[B] CDAC:Cross-domain Attention Consistency in Transformer for Domain Adaptive Semantic Segmentation\n[C] Diffusion-based Image Translation with Label Guidance for Domain Adaptive Semantic Segmentation\n[D] Learning Pseudo-Relations for Cross-domain Semantic Segmentation" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The motivation is clear, with a thorough statistical analysis of the class bias issue in unsupervised domain adaptation (UDA) for semantic segmentation (Figures 1 and 2).\n- The paper is generally well-written, well-structured, and easy to follow.\n- The proposed method comprises four modules. Although each module is simple and widely used in the machine learning field (e.g., GMM and alignment with anchor distributions), these techniques are effective in addressing issues found in this task.\n- The experiments are comprehensive, covering three transfer tasks for segmentation, an additional image classification task (included in the supplementary materials), and extensive qualitative analyses." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the challenge of class imbalance in unsupervised domain adaptation (UDA) for semantic segmentation, where labeled source data is used to improve the model’s performance on an unlabeled target dataset. The authors propose a Balanced Learning for Domain Adaptation (BLDA) technique that aligns class predictions by analyzing and adjusting predicted logit distributions, even without prior knowledge of distribution shifts. BLDA enhances UDA model performance by mitigating class bias, particularly for under-represented classes, leading to more accurate segmentation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The proposed method is computationally heavy, as it includes an additional regression head with extra training objectives and requires GMM updates via EM algorithms. Consequently, this approach may incur significantly more computation time and memory usage than baseline methods.\n\n2. In Tables 1, 2, and 4, all existing methods equipped with BLDA are outdated. It remains questionable whether current SOTA methods (in 2023 and 2024) are sufficient to address prediction bias issues." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the weaknesses for details. Due to the concerns of the novelty and potential impact, the reviewer is inclined to rate a borderline reject." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper is well-written and easy to follow. The figures clearly show the distribution trends to help understand the core idea. \n2. There are many formula languages to describe the proposed method precisely.\n3. The experiments on the GTAv/SYNTHIA/Cityscapes benchmark show clear improvements over baseline methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper discusses the unsupervised domain adaptation problem in semantic segmentation tasks. The method first identifies unbalanced classes by analyzing the predicted logits. Then, it aligns the distributions using a preset anchor distribution. Finally, it also adopts a Gaussian mixture model to estimate logits online to generate unbiased pseudo-labels for self-training. Experiments are conducted on the classic GTAv/SYNTHIA to Cityscapes benchmark for evaluation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The novelty is limited. The data distribution problem is not newly recognized, and the proposed method adopting anchor distributions for alignment and GMM for unbiased generation is also explored by previous methods. For example, the following papers [a-d] also adopt anchors and/or GMM methods to cross-domain alignment. Please consider providing more discussion with these related works.\n2. The method is only verified on a relatively small-scale benchmark. The compared works are from two years ago, which cannot prove this work's value to today's more advanced semantic segmentation approaches. Please consider providing more analysis with other datasets to prove the generalization ability of the method. Optional datasets such as Vistas, IDDA, BDD100k, and VIPER.\n\n[a] Multi-Anchor Active Domain Adaptation for Semantic Segmentation\n\n[b] Category Anchor-Guided Unsupervised Domain Adaptation for Semantic Segmentation\n\n[c] ProtoGMM: Multi-prototype Gaussian-Mixture-based Domain Adaptation Model for Semantic Segmentation\n\n[d] Uncertainty-aware Pseudo Label Refinery for Domain Adaptive Semantic Segmentation" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "## Some questions in Figure 3:\n\n1. Figure 3 presents the logit distributions for positive and negative samples; however, the lack of labeled x- and y-axes in the figure makes it challenging to interpret these distributions effectively. \n2. There is no clear explanation of the direction of reweighting and resampling applied to the logit distributions. This omission makes it difficult to understand the intended insights from Figure 3, as well as the overall method’s mechanism and impact on balancing. \n\n3. There are a few grammatical errors, such as the \"Discusiion\" in L307." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The class-imbalanced is an important issue in DASS, and this paper provides a novel method to tackle this problem by aligning the logits distributions of all classes with anchor distributions to achieve balanced prediction.\n\n2. Extensive experiments have demonstrated the effectiveness of the proposed method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a BLDA method to address class-imbalanced problem in unsupervised domain adaptive semantic segmentation. BLDA analyzes the distribution of predicted logits to assess class prediction bias and proposes an online logits adjustment mechanism to balance class learning in both source and target domains. The method incorporates Gaussian Mixture Models (GMMs) to estimate logits distributions and aligns them with anchor distributions using cumulative density functions. Extensive experiments on standard UDA semantic segmentation benchmarks demonstrate significant performance improvements." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper claims a key contribution in proposing a post-hoc class balancing technique to adjust the network's predictions by establishing two anchor distributions, $P_p$ for positive predictions and $P_n$ for negative predictions. However, the paper lacks sufficient explanation regarding the selection criteria for these anchor distributions, which raises questions about the method's validity and soundness.\n\n2. The current approach in this paper aligns the positive and negative distributions to anchor distributions as part of the post-hoc class balancing strategy. However, based on my understanding, this alignment may not effectively address label noise—a crucial aspect of self-training where pseudo label denoising is often central to performance improvement. Instead, recent studies [1,2] have demonstrated the utility of negative pseudo labeling, showing that leveraging negative information more directly can enhance model robustness and reduce noise. Clarification on the rationale for this alignment-based approach, especially in comparison to existing negative pseudo-labeling methods, would help to justify the method’s efficacy and theoretical basis in the context of label noise mitigation.\n\n[1]. Domain Adaptive Semantic Segmentation without Source Data\n\n[2]. A Curriculum-style Self-training Approach for Source-Free Semantic Segmentation" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024balanced,\ntitle={Balanced Learning for Domain Adaptive Semantic Segmentation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0MhlzybvAp},\nnote={under review}\n}" }, "abstract": { "value": "Unsupervised domain adaptation (UDA) for semantic segmentation aims to transfer knowledge from a labeled source domain to an unlabeled target domain, improving model performance on the target dataset without additional annotations.\nDespite the effectiveness of self-training techniques in UDA, they struggle to learn each class in a balanced manner due to inherent class imbalance and distribution shift in both data and label space between domains.\nTo address this issue, we propose Balanced Learning for Domain Adaptation (BLDA), a novel approach to directly assess and alleviate class bias without requiring prior knowledge about the distribution shift between domains.\nFirst, we identify over-predicted and under-predicted classes by analyzing the distribution of predicted logits.\nSubsequently, we introduce a post-hoc approach to align the positive and negative logits distributions across different classes using anchor distributions and cumulative density functions.\nTo further consider the network's need to generate unbiased pseudo-labels during self-training, we couple Gaussian mixture models to estimate logits distributions online and incorporate logits correction terms into the loss function.\nMoreover, we leverage the resulting cumulative density as domain-shared structural knowledge to connect the source and target domains.\nExtensive experiments on two standard UDA semantic segmentation benchmarks demonstrate that BLDA consistently improves performance, especially for under-predicted classes, when integrated into existing methods.\nOur work highlights the importance of balanced learning in UDA and effectively mitigates class bias in domain adaptive semantic segmentation." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Semantic segmentation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/968cbb6184ebed9b5c2e0126e082192e9ae30e8f.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Balanced Learning for Domain Adaptive Semantic Segmentation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0N8yq8QwkD
Mani-GS: Gaussian Splatting Manipulation with Triangular Mesh
main
Withdraw
Editable Rendring; 3DGS; Differential Rendering
applications to computer vision, audio, language, and other modalities
Xiangjun Gao;Xiaoyu Li;Yiyu Zhuang;Qi Zhang;Wenbo Hu;Chaopeng Zhang;Yao Yao;Ying Shan;Long Quan
~Xiangjun_Gao1;~Xiaoyu_Li2;~Yiyu_Zhuang1;~Qi_Zhang10;~Wenbo_Hu2;~Chaopeng_Zhang1;~Yao_Yao1;~Ying_Shan2;~Long_Quan2
5;5;5;5;5
5;4;5;5;4
2;3;3;3;3
2;2;1;2;2
2;3;3;3;1
5
4.6
2.8
1.8
2.4
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": { "value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors." } }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- On line 505, you mention that the results using SuGaR mesh are 33.676 dB (You + SuGaR), which is higher than 33.34 (You + NeuS). Why use NeuS if this is the case? If this is the case, the contribution on a quantitative level does not seem to be significant.\n- In Table 1, please add, if possible, the rendering results of NeuS so that it can be seen how much the authors improve on the work of NeuS." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The strengths of this work rely mainly on addressing a highly relevant problem and achieving great values compared to their chosen state-of-the-art methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors improve the methodology of manipulating renderings generated by 3D Gaussian Splatting (3DGS). To achieve this, they propose the use of triangular mesh (generated by NeuS) as initial input to the 3DGS. Additionally, the authors propose a triangle-aware 3DGS to improve the manipulation and rendering capability." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "This work suffers from a few larger issues:\n\n- Poor writing quality. Here, we mostly mean that the paper heavily introduces and talks about NeRF in the introduction and related work, while this is not relevant for understanding the paper. Further, structurally the paper needs some improvements (for example Figure is mentioned on page 4 but not seen until page 6)\n- In general, while the method works decently, the contributions do not seem to be enough\n- Compared to SuGaR, the author here uses better meshes that are generated from NeuS (higher training time). The authors should address the differences in the training in their work." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please check the weakness section. My main concern is the comparison with GaMeS or Mesh-GS." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is well written and straightforward.\n2. The paper proposes to bind Gaussians to a local triangle space, which maintains the local rigidity and preserves the relative\nlocation between Gaussians, allowing the method to preserve the high-fidelity rendering results after manipulation.\n2. The manipulation results are vivid and interesting, especially the soft body simulation.\n3. The authors demonstrate the editability of the method on three different tasks, which shows the capability of the method in various scenarios." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper utilizes a given triangular mesh for free-form shape deformation of Gaussian-splatting with self-adaption. By parameterizing each Gaussian in the local triangle space and decoupling the local and global transformations, the proposed method maintains the local rigidity and preserves the relative location between Gaussian, which is robust to inaccurate meshes. The authors demonstrate the method editability in three tasks, large deformation, local manipulation, and soft body simulation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The authors need to compare their method with GaMeS or Mesh-GS to demonstrate their contributions. In comparison to GaMeS which constrains the Gaussians on the surface exactly, the main contributions of Mani-GS are (1) attaching the Guassians to local space rather than global space, and (2) allowing Gaussians to offset out of the attached triangle. Could the authors provide some qualitative results that support those two contributions? In terms of the rendering quality given an inaccurate mesh and the rendering quality after manipulation. For example, given the Poisson mesh in Fig.8, where part of the pot is missing, Mani-GS can better fill the missing part than GaMeS since it allows the offset. And for example, show a case where the rendering quality of Mani-GS is better than GaMeS after manipulation due to the local triangle space.\n\n2. In line 333 authors propose to use an adaption vector to scale both the offset vector and the scale of the Gaussian. However, the adaption vector solely depends on the length of the three triangle edges. Imagine stretching a triangle along its plane, e1, e2, e3 will all increase as the edge lengths get larger. Since e2 increases, the offset of the Gaussian along the triangle's normal direction will get larger, the Gaussian will move farther from the plane. A concrete example could be the Poisson mesh in Fig. 8, since part of the pot is missing, to be able to reconstruct the pot there must be lots of Gaussians that have large offsets along the normal directions, in that case if you stretch the pot vertically, I'd expect the Gaussians to expand horizontally as well. Does this lead to artifacts empirically? I'm happy to hear any comments on this. \n\n3. Is the mesh used in Table 1 extracted from SuGaR? If not what's the mesh used there and could you provide the results using the SuGaR mesh for a fair comparison? If yes seems the average PSNR is different from what is mentioned later in line 505.\n\n4. Do you regularize the scale of the local position \\mu? I'm concerned that a Gaussian could significantly offset the attached triangle, potentially causing artifacts after manipulation.\n\n5. In line 44 NeRF-Editing is referred to as Yuan et al. 2022, but in the rest of the paper (for example lines 365 and 379) it becomes Liu et al. 2021. The former approach is more relevant for comparison, as it aligns more closely with the context of free-form shape deformation, which the latter approach does not directly address. Is it a typo?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The training of 3DGS in the paper is conducted entirely in static scenes, which fails to effectively learn the correspondence that Gaussian and mesh should maintain during motion. If a 3D Gaussian is trained separately and then matched to the mesh surface (transforming coordinates from world space to the local space of each triangle), can good manipulation still be achieved?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper is well written, and the analysis is comprehensive.\n- The idea of correlating the scale of 3D Gaussians with the shape of the triangles to better handle large-scale deformations is reasonable.\n- The experimental results appear to be valid." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work proposes a method for 3D Gaussian manipulation using a mesh as a proxy. By defining a local coordinate system for each triangle, the paper associates Gaussians with each triangle in a self-adaptive manner. The paper is clearly illustrated and thoroughly demonstrated." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- 3D Gaussian Spatting achieves high-quality rendering results primarily due to its split/clone mechanism, which adaptively adjusts the number of points in the scene. However, this paper limits the number of Gaussians in each triangle face, which may restrict its fitting capability. Nevertheless, the rendering metrics in Table 1 appear to be very high, with some even exceeding those of the original 3DGS; this raises questions.\n- The main innovation of this paper lies in the introduction of $e$ in Equation 7 to better handle large-scale deformations. However, this is not evident in the ablation study. In fact, both the 3DGS on Mesh and Mesh + Offset experiments seem not to address the rotation of Gaussians, which is unreasonable.\n- The current experimental examples are focused on hard surfaces. However, the greater advantage of 3DGS compared to meshes lies in rendering scenes without well-defined surfaces. How does this method perform on fuzzy geometry (e.g., the data from \"Adaptive Shells for Efficient Neural Radiance Field Rendering\")?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Can you please summarize additional recent baselines, including Gaussian Frosting, and compare against those?\n\n2. Please show large deformations or avoid claiming them." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is reasonably well written and easy to understand. It addresses the important challenge of editing 3D scenes represented by 3D Gaussians. The qualitative results look compelling and quantitatively outperform the sugar baseline." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This submission describes an approach for deforming a 3D scene represented by 3D Gaussians. Towards this goal, the proposed method extracts a triangle mesh, binds the 3D Gaussians representing the scene to the mesh (on- and off-surface), and then uses the mesh to drive rigid and non-rigid deformations of the 3D Gaussians for object deformation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "There are several weaknesses:\n\n1. The proposed methods is incremental compared with sugar. The proposed method is basically sugar, which binds the optimized 3D Gaussians to the mesh surface. with an additional offset. This seems like a simple extension. More advanced extensions of sugar already exists, including \n\nGuedon and Lepetit, Gaussian Frosting: Editable Complex Radiance Fields with Real-Time Rendering, ECCV 2024\n\nwhich model a much broader class of objects than both sugar and the proposed work.\n\n2. The related work discussion is too focused on NeRF, instead of giving a more comprehensive snapshot of approaches that enable animatable / deformable 3D Gaussians. A few examples:\n\nHuang and Yu, GSDeformer: Direct Cage-based Deformation for 3D Gaussian Splatting\nAbdal et al., Gaussian Shell Maps for Efficient 3D Human Generation, CVPR 2024\nYang et al., Deformable 3D Gaussians for High-Fidelity Monocular Dynamic Scene Reconstruction, CVPR 2024\n\nSo relevant papers published at CVPR 2024, ECCV 2024, and also SIGGRAPH Asia 2024\n\n3. Some claims on the capabilities of the proposed system seem exaggerated\n\nThe presented results look good, but they mainly show local and non-rigid deformations. I did not see examples that show the claimed \"large deformations\" (see g.g. abstract & introduction)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. There are some citation errors, such as the reference to Mesh-GS (Waczynska et al., 2024), which actually pertains to the GaMeS paper.\n2. Have you tried testing your method on real-world data with backgrounds, such as the LLFF dataset? How effective is it in such cases?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The article has a clear logic and provides an in-depth analysis of the problem. For example, when discussing how to bind Gaussians to the mesh (Sec.3.3), the authors compared two alternative methods (\"Gaussians on Mesh\" and \"Gaussians on Mesh with Offset\"), analyzing the principles, advantages, and disadvantages of each. Another example is the authors' discussion of the results from different mesh extraction methods (Sec.3.2). \n2. The supplementary materials are meticulously prepared, and the demo presentation is impressive, showcasing excellent results on editing and simulating." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This article focuses on the editing of Gaussians. Its central idea is to first perform mesh modeling of the scene, then bind 3DGS to the mesh for topology-consistent training. To better bind Gaussians to the mesh’s triangular surfaces, the paper proposes a coordinate system definition method based on triangles, allowing the topology to maintain a more stable structure. Once completed, this enables Gaussian editing and simulation similar to mesh manipulation. The authors conducted experiments on NeRF synthetic data and DTU data, achieving the expected editing effects." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The discussion of some works is insufficient. For example, GaMeS and Mesh-GS are mentioned in the related work section, but as the most closely related and recent Gaussian methods, they are not included in the experimental comparisons. Methodologically, I feel that the Gaussian binding approach in this article is very similar to that of GaMeS, yet the authors do not discuss this point. The baselines the authors compare are outdated and are insufficient to demonstrate the superiority of their method.\n2. The range of data types this article's method can be applied to is not diverse enough. From the authors' experiments, it currently only supports the editing of small objects and relies heavily on the topology mesh obtained from mesh reconstruction algorithms (e.g., NeuS). If the object becomes more complex or includes a complex background, this approach is likely to produce a degraded-quality mesh, making it impossible to proceed with subsequent binding operations." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "By utilizing a triangle shape-aware Gaussian binding and adapting method, we can achieve 3DGS manipulation and preserve high-fidelity rendering after manipulation." }, "_bibtex": { "value": "@misc{\ngao2024manigs,\ntitle={Mani-{GS}: Gaussian Splatting Manipulation with Triangular Mesh},\nauthor={Xiangjun Gao and Xiaoyu Li and Yiyu Zhuang and Qi Zhang and Wenbo Hu and Chaopeng Zhang and Yao Yao and Ying Shan and Long Quan},\nyear={2024},\nurl={https://openreview.net/forum?id=0N8yq8QwkD}\n}" }, "abstract": { "value": "Neural 3D representations such as Neural Radiance Fields (NeRFs), excel at producing photo-realistic rendering results but lack the flexibility for manipulation and editing which is crucial for content creation. Previous works have attempted to address this issue by deforming a NeRF in canonical space or manipulating the radiance field based on an explicit mesh. However, manipulating NeRF is not highly controllable and requires a long training and inference time. With the emergence of 3D Gaussian Splatting (3DGS), extremely high-fidelity novel view synthesis can be achieved using an explicit point-based 3D representation with much faster training and rendering speed. However, there is still a lack of effective means to manipulate 3DGS freely while maintaining rendering quality. In this work, we aim to tackle the challenge of achieving manipulable photo-realistic rendering. We propose to utilize a triangular mesh to manipulate 3DGS directly with self-adaptation. This approach reduces the need to design various algorithms for different types of Gaussian manipulation. By utilizing a triangle shape-aware Gaussian binding and adapting method, we can achieve 3DGS manipulation and preserve high-fidelity rendering after manipulation. Our approach is capable of handling large deformations, local manipulations, and even physics simulations while keeping high-quality rendering. Furthermore, we demonstrate that our method is also effective with inaccurate meshes extracted from 3DGS. Experiments conducted on NeRF synthetic datasets demonstrate the effectiveness of our method and its superiority over baseline approaches." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": { "value": [ "~Xiangjun_Gao1", "~Xiaoyu_Li2", "~Yiyu_Zhuang1", "~Qi_Zhang10", "~Wenbo_Hu2", "~Chaopeng_Zhang1", "~Yao_Yao1", "~Ying_Shan2", "~Long_Quan2" ] }, "authors": { "value": [ "Xiangjun Gao", "Xiaoyu Li", "Yiyu Zhuang", "Qi Zhang", "Wenbo Hu", "Chaopeng Zhang", "Yao Yao", "Ying Shan", "Long Quan" ] }, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Editable Rendring; 3DGS; Differential Rendering" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": { "value": "gao|manigs_gaussian_splatting_manipulation_with_triangular_mesh" }, "pdf": { "value": "/pdf/afeb0719e2b1a7c5368747ee5fa586a73838ada7.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/703588398cb9ca32bf2bfd2662d5d3a3405990ad.zip" }, "title": { "value": "Mani-GS: Gaussian Splatting Manipulation with Triangular Mesh" }, "venue": { "value": "ICLR 2025 Conference Withdrawn Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Withdrawn_Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0NAVeUm7sk
Variational Bayesian Pseudo-Coreset
main
Active
Bayesian Pseudo-Coreset;Variational Inference
probabilistic methods (Bayesian methods, variational inference, sampling, UQ, etc.)
5;5;5;5
4;3;3;2
3;3;3;3
2;2;2;2
2;3;3;3
5
3
3
2
2.75
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- While VBPC demonstrates improvement over some BPC baselines, its classification accuracy on benchmark datasets like CIFAR-10, CIFAR-100, and Tiny-ImageNet remains notably lower than that of state-of-the-art classifiers. This raises concerns about the practical competitiveness of VBPC in real-world applications, particularly in image classification tasks where accuracy is crucial. Could additional optimizations or refinements to the VBPC approach improve performance?\n- The current experiments may not adequately showcase the strengths of VBPC in relevant scenarios. To enhance the paper’s impact and applicability, I suggest conducting additional experiments in settings where VBPC’s efficiency gains could be more convincingly demonstrated." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper effectively utilizes variational inference to derive a closed-form posterior distribution for the weights of the last layer, thereby addressing some of the performance limitations observed in prior BPC approaches.\nVBPC’s capability to approximate the predictive distribution in a single forward pass enhances both computational and memory efficiency, positioning it as a potentially valuable method for large-scale applications." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents the Variational Bayesian Pseudo-Coreset (VBPC) method, aimed at efficiently approximating the posterior distribution in Bayesian Neural Networks (BNNs). Given that BNNs face substantial computational challenges when dealing with large datasets due to their high-dimensional parameter spaces, VBPC provides a promising method. Traditional Bayesian Pseudo-Coreset (BPC) techniques have been proposed to alleviate these issues, yet they often struggle with memory inefficiencies. VBPC addresses this by leveraging variational inference (VI) to approximate the posterior distribution. This method achieves a memory-efficient approximation of the predictive distribution using only a single forward pass, which makes it appealing for computationally intensive applications." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The experimental validation on practical application is limited." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- The accuracy of these algorithms on cifar10, cifar100 and Tiny-Imagenet is too low. VBPC is effective relative to several existing BPC baselines, but the performance is significantly lower compared to existing state-of-the-art classifiers.\n- In the field of image classification, at least the scenes you choose for classification, these experiments do not seem to show the advantages of your method convincingly. \n- Please try to provide some new experiments, in more convincing scenarios, to illustrate the practical application value of your method." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- This paper leverages the variational formulation to obtain the closed-form posterior distribution of the last layer weights, which resolves the issue of suboptimal performance seen in previous approaches.\n- And, the method approximates the predictive distribution with only a single forward pass instead of multiple forwards, making the approach computationally and memory-efficient." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes Variational Bayesian Pseudo-Coreset (VBPC), a novel approach to efficiently approximate posterior distribution in Bayesian Neural Networks (BNNs). Bayesian Neural Networks often face issues with largescale datasets due to their high-dimensional parameter space. To reduce the computational load, many Bayesian Pseudo-Coreset (BPC) methods have been proposed, but they suffer from memory inefficiencies. VBPC addresses these limitations by using variational inference (VI) to approximate the posterior distribution. Moreover, this paper provides a memory-efficient method to approximate the predictive distribution with only a single forward pass instead of multiple forwards, making the approach computationally and memory-efficient." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The experiment is not enough to illustrate the function of the algorithm." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Would Laplace approximation on the softmax likelihood be a better option than first choosing a variational inference scheme and then using a Gaussian likelihood? Laplace proves to be a powerful approach in Gaussian process classification.\n- Is the claim of Manousakas et al. 2020 contrary to the main message of the paper? If correct, would this not undermine the significance of the proposed solution? If incorrect, why?\n- Do we really lack an analytical solution or at least an EM-like algorithm where the E-step has an analytical solution when only the last layer of a neural net is probabilistic?\n- What is the take-home message of Figure 1? I am missing to see a particular pattern there that helps motivate the proposed solution.\n\nMy initial score is a borderline as the paper has both certain merits and clear question marks. I am happy to consider significant score updates based on a convincing rebuttal discussion." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper is particularly well-written with a clearly defined problem scope and a solid solution methodology that follows a well-justified sequence of development steps,\n- The proposed bilevel variational inference formulation is neat and sensible.\n- The computational complexity analysis is indeed helpful to see the merit of the devised solution.\n- The reported results are strong on the chosen group of data sets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper studies the problem of core set extraction using Bayesian inference. The proposed solution builds on a two-stage variational inference scheme where the first stage is responsible for inferring the optimal core set while the second is to fit this core set to the full-scale data set at hand. The developed solution overarches the whole family of the distributions that belong to the exponential family." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper motivates the core set extraction problem with use cases such as processing big data and addressing continual learning setups. However, the presented results are on data sets that can be considered in the present technological landscape as toy problems. I do symphathize the idea of prototyping. But given the strong applied component of the present work, I am still unconvinced about the generalizability of the observed scores to a case where coreset extraction is an actual necessity. The issue may be addressed during the rebuttal by showing results on a large enough data set used as a standard coreset extraction benchmark or a continual learning application.\n- The need to use the Gaussian likelihood to avoid the need for an approximation stage is only partially convincing. It is an artiffact of choosing variational Bayes as the inference scheme, which is exogeneous to the problem anyway. Maybe this issue, linked to my first question below, will be clarified during the rebuttal." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Scalability to Larger Datasets: How does VBPC perform when applied to much larger datasets, such as ImageNet or larger text datasets? Does the method scale well in terms of both computational efficiency and accuracy, or does it encounter bottlenecks?\n\n2. Hyperparameter Sensitivity: How sensitive is VBPC to hyperparameters such as the number of pseudo-coresets, coreset size, and model initialization? Do suboptimal hyperparameter settings lead to significant performance degradation?\n\n3. Computational Costs and Training Time: Can you provide a detailed comparison of training times and wall-clock time between VBPC and other methods, particularly Bayesian Pseudo-Coreset methods using SGMCMC? How does the computational time scale with increasing dataset size?\n\n4. Limitations of the Last-Layer Approximation: Does the last-layer variational approximation hold up in deeper networks or more complex tasks such as regression? Have you observed any failure cases where this approximation does not capture enough uncertainty?\n\n5. Interpretability of Pseudo-Coresets: What do the learned pseudo-coresets represent? Are they capturing key features of the original dataset, and if so, how do they evolve during training? Is there a way to interpret or visualize the coreset to provide better intuition about what is being distilled?\n\n6. Generalization to Different Tasks: Can the VBPC method be applied effectively to tasks beyond classification, such as regression or other types of Bayesian inference? If so, how does the method adapt to these different problem types?\n\n7. Robustness to Out-of-Distribution (OOD) Data and Adversarial Attacks: Does VBPC provide any robustness to adversarial attacks or strong distribution shifts beyond what is demonstrated with CIFAR10-C? How does the method perform in more severe OOD scenarios?\n\n8. Memory-Efficient Loss Computation: How significant is the impact of memory-efficient loss computation during training in terms of accuracy or stability? Does it introduce any trade-offs in performance, particularly in very high-dimensional settings?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper introduces a novel approach to improving the efficiency of Bayesian Neural Networks (BNNs) by combining variational inference with pseudo-coresets. This innovation is notable because it addresses the longstanding challenges in the field related to computational and memory inefficiencies when scaling BNNs to large datasets. The proposal of a closed-form solution for last-layer variational inference is a significant departure from prior methods that relied on more memory-intensive sampling-based approaches. By focusing on variational inference and pseudo-coresets, the authors provide an original contribution that builds on existing methods but removes key limitations such as memory usage and the reliance on gradient stopping.\nThe technical rigor of the paper is high, with well-founded theoretical development and comprehensive empirical evaluations. The authors derive closed-form solutions for coreset variational inference, addressing a critical computational bottleneck in Bayesian model averaging. Their empirical results, demonstrated across multiple datasets (e.g., MNIST, CIFAR10, CIFAR100), show significant improvements over existing BPC methods in terms of both accuracy and negative log-likelihood, which strengthens the quality of their proposed method. The paper also includes a variety of comparisons with competitive baselines, reinforcing the robustness and effectiveness of their approach.\nThe paper is clearly structured and provides sufficient background for readers to understand both the motivation and the details of the proposed method. The explanation of the problem, the limitations of prior work, and the step-by-step presentation of the VBPC approach are clear and easy to follow. The use of mathematical derivations is well-supported by intuitive explanations, making the complex variational inference approach more accessible. The inclusion of visual results and performance tables also contributes to clarity, helping readers visualize the practical benefits of VBPC.\nThe significance of the work lies in its potential to influence how BNNs are applied to large-scale data problems. By significantly reducing the memory and computational burdens of Bayesian model averaging, the proposed VBPC method makes BNNs more feasible for real-world applications, such as those in healthcare and climate analysis, where uncertainty estimation is critical. The method could have broad implications for other fields requiring scalable, probabilistic neural networks. Additionally, the ability to perform Bayesian inference with less computational overhead enhances the practicality of deploying BNNs in production environments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper titled “Variational Bayesian Pseudo-Coreset” introduces a novel method aimed at reducing the computational and memory challenges associated with large datasets in deep learning, particularly within the context of Bayesian Neural Networks (BNNs). The authors address limitations in prior methods of Bayesian Pseudo-Coresets (BPCs), which often face inefficiencies in memory usage and suboptimal results during training. They propose a new approach called Variational Bayesian Pseudo-Coreset (VBPC), which leverages variational inference to approximate the posterior distribution of model weights. The key innovation of VBPC is the use of a closed-form solution to compute the posterior for the last layer of BNNs, eliminating the need for complex gradient-stopping techniques used in previous BPC methods. This significantly reduces memory usage and computational load. Additionally, VBPC allows for more efficient training and inference by using a single forward pass for predictive distribution computation. Empirical evaluations demonstrate that VBPC outperforms existing BPC methods on benchmark datasets, showing improvements in both accuracy and negative log-likelihood metrics across various datasets, such as MNIST, CIFAR10, and CIFAR100. The paper contributes to the field by enhancing the efficiency and scalability of BNNs, particularly in environments that require handling large-scale data while maintaining the benefits of Bayesian inference." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper focuses heavily on benchmark datasets like MNIST, CIFAR10, and CIFAR100, which are common in academic research but may not fully represent the complexity of real-world problems. While these datasets help establish baseline performance, the paper would benefit from exploring more challenging, domain-specific datasets, particularly those that are more representative of practical applications in fields such as healthcare or finance. Expanding the evaluation to datasets that feature more variability and noise could demonstrate the method’s robustness in real-world settings, which is especially important given the paper’s goal of making Bayesian Neural Networks more feasible for large-scale applications.\nΑlthough the paper demonstrates memory efficiency improvements, there is no extensive discussion of the scalability of the method when applied to very large datasets beyond those tested (e.g., ImageNet or even larger datasets in natural language processing). The paper could benefit from a more detailed analysis of the method’s behavior as the dataset size grows significantly. Additionally, the paper does not provide enough insight into the sensitivity of the method to hyperparameter choices such as the coreset size or the initialization of the model pool. It would be helpful to include an ablation study or sensitivity analysis that investigates how performance degrades with suboptimal hyperparameter choices and whether the method requires careful tuning to achieve competitive results.\nWhile the paper emphasizes memory savings, it does not provide a thorough comparison of training times between VBPC and existing methods, particularly in scenarios with high-dimensional datasets. A more detailed analysis of wall-clock time or computational complexity across different hardware configurations (e.g., GPUs versus CPUs) would be useful. This would help practitioners better understand the trade-offs between memory savings and potential increases in computational time, especially when scaling to larger architectures and datasets.\nThe paper relies on the last-layer variational approximation to simplify the posterior calculation, but the limitations of this approach are not thoroughly discussed. While the paper suggests that this approximation performs comparably to more complex methods, it would be valuable to include a deeper investigation of when this approximation might fail, especially in models with deep architectures or tasks requiring fine-grained uncertainty estimation. A discussion on whether the approximation is sufficient in all use cases or only certain tasks (e.g., classification versus regression) would make the paper more transparent.\nThe paper demonstrates that VBPC can learn pseudo-coresets that effectively approximate the full dataset’s posterior, but it doesn’t provide much insight into the interpretability of these pseudo-coresets. For example, what are the learned coresets capturing in terms of dataset distribution or feature representation? A qualitative analysis, such as visualizing the pseudo-coresets or interpreting what aspects of the data they retain, would help reinforce the method’s effectiveness. Additionally, further explanation of how these pseudo-coresets evolve during training and contribute to Bayesian uncertainty could strengthen the narrative.\nWhile the paper briefly touches on robustness to distributional shifts using CIFAR10-C, the evaluation of predictive uncertainty in real-world settings is somewhat lacking. It would be useful to see how VBPC handles more complex out-of-distribution (OOD) detection tasks or how well it captures uncertainty under adversarial conditions, which are critical aspects of Bayesian inference in high-stakes applications like healthcare. A more thorough evaluation in these contexts could elevate the practical relevance of the method." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "In this paper, we propose a new Bayesian Pseudo-Coresets method which enables the efficient variational inference." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024variational,\ntitle={Variational Bayesian Pseudo-Coreset},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0NAVeUm7sk},\nnote={under review}\n}" }, "abstract": { "value": "The success of deep learning requires large datasets and extensive training, which can create significant computational challenges. To address these challenges, pseudo-coresets, small learnable datasets that mimic the entire data, have been proposed. Bayesian Neural Networks, which offer predictive uncertainty and probabilistic interpretation for deep neural networks, also face issues with large-scale datasets due to their high-dimensional parameter space. Prior works on Bayesian Pseudo-Coresets (BPC) attempt to reduce the computational load for computing weight posterior distribution by a small number of pseudo-coresets but suffer from memory inefficiency during BPC training and sub-optimal results. To overcome these limitations, we propose Variational Bayesian Pseudo-Coreset (VBPC), a novel approach that utilizes variational inference to efficiently approximate the posterior distribution, reducing memory usage and computational costs while improving performance across benchmark datasets." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Bayesian Pseudo-Coreset", "Variational Inference" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/b52be084d314210354c3e911c9c0cc38a2cf09af.pdf" }, "presentation": null, "primary_area": { "value": "probabilistic methods (Bayesian methods, variational inference, sampling, UQ, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/98748616696173bf473371b3b3e9dcb0bca57c14.zip" }, "title": { "value": "Variational Bayesian Pseudo-Coreset" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0NEjIZlEhP
Verified Relative Output Margins for Neural Network Twins
main
Active
Relative Output Margin;Formal Verification;Deep Neural Networks
alignment, fairness, safety, privacy, and societal considerations
3;3;3;5;5
4;4;3;4;2
2;3;3;3;2
2;2;1;3;2
3;2;3;3;2
3.8
3.4
2.6
2
2.6
-0.408248
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Included in weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The problem the authors are addressing is import and relevant:\nHow to formally compare two neural networks' decision confidence across many examples and not only at the given evaluation datapoints. This is especially relevant when we modify networks via e.g. quantization and pruning but need guarantees (in high stakes settings such as medicine).\n\nThe linear programming formulation makes the solution practical, which is good -- imho a method that can be practically applied is a key to wide adoption and impact.\n\nI also appreciate the comparison to adversarially trained models." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "I am pretty new to this field so I will try to summarize what I understand to be the key contributions of this paper. If I am wrong, please do let me know in the comments:\n\n1. The authors introduce a new way to compare two neural networks (e.g. an original and a compressed version of the same net) by looking at their \"relative output margins\" = essentially comparing how confidently they make the same decisions.\n\n2. They provide a formal verification framework that can prove, within a small neighborhood of a given input (like a small perturbation of an image), that one network will always make decisions at least as confidently as another network when they're both correct.\n\nThey demonstrate this is practically useful when:\n\na. Comparing original networks with their pruned/quantized/distilled versions\nb. Analyzing medical AI systems where reliability is crucial\nc. Understanding the relationship between regular and adversarially-trained models\n\nThe key innovation is that instead of trying to verify properties across all possible inputs (which would be intractable), they focus on small, local neighborhoods around specific inputs and use linear programming techniques to efficiently compute provable bounds on the networks' relative behavior in these regions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I have a few concerns:\n\n1. Scalability\n\n1.1. linear programming can be expensive -- how well does this scale to larger networks?\n1.2. small networks and small regions are shown in the paper. How well would this do on e.g. a ~100M parameter ViT and with larger regions?\n\n2. How tight are the bounds? Do you have any experiments to demonstrate that? I would also be great to discuss worst-case scenarios with examples and develop some kind of a rudimentary case study of that.\n\n3. Small regions\nThe small perturbation sizes used (0.001, 0.01) may not reflect real-world distortions. If I add a bit of a Gaussian noise to the whole image, I can easily get much higher delta. \n\n4. Comparison to other linear programming based methods\nI think there are other methods that use local approximations to calculate the difference between networks (I might be wrong on this), yet you don't compare to them?\n\n5. Medical Application Claims\nIt would be good to compare your estimates to some sources of ground truth. Is that feasible?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1) LROM is assymmetric, i.e inverting DNN N1 and N2 yields a different bound. Can you comment on this property? Is this something desirable? What if LROM(N1, N2) equals some value, and LROM(N2, N1) equals another, is there something to interpret here (qualitatively)?\n\n2) When LROM is very negative (i.e logits difference is huge) which implies that probabilities are near-zero, not too far away from machine precision zero, do you expect this value to carry meaningful information?\n\n3) Can you clarify use-cases in which LROM is useful for practioner?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "### Originality\n\nIt think the main originality of the paper lies in considering the joint optimization of the margins of two networks. Intuitively, one can understand why this is a superior approach compared to optimizing separately the bounds and trying to aggregate them (although the optimization problem is now double the dimension), as shown in Figure 2.\n\nFocusing on the context of pruned/distilled/quantized networks is also relevant, re-targeting the (sometimes too ambitious) conventional goal of certifying a given network into just showing that some networks is not much worse than an other.\n\n### Clarity and soundness\n\nThe paper is clear overall, proofs look correct." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper is interested in certifying the output of a given network w.r.t another one, targeting use-cases like pruning/distillation/quantization, in the goal of demonstrating that the pruned/distilled/quantized network exhibits not only similar accuracy over the train set, but even consistent decisions around the same local regions around each training example.\n\nTo do so, they introduce a novel measure called Relative Output Margin (ROM), with is the ratio between the Output Margin (OM) of two networks at a given input point. Finally, by taking the minimum of the ROM over a whole infinity-norm ball centered around a given point, they define the Local ROM (LROM).\n\nComputing the exact LROM is a hard problem, but its linear relaxation for feed-forward ReLU networks is tractable and yields a lower bound, which is sufficient for the purpose of certification. \n\nThe algorithm is tested in the context of pruning, quantization, and distilled networks, on two images datasets (MNIST, CIFAR-10), and two tabular/signal datasets (CHB-MIT and MIT-BIH)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "### Novelty\n\nMy main source of concern is the novelty. Computing certificates for ReLU-based networks is a well established methodology that relies on various linear relaxation (as used in this paper), or interval propagation. \n\nThis paper utilizes these tools, and the only novelty is to consider a joint optimization of the difference of logits for two networks, instead of a single one, which I consider a straightforward extension departing from existing methods.\n\nTherefore the main contribution of the paper is introducing the OM, ROM and LROM measures, and using the whole Section 2 to deal with rather trivial considerations.\n\nAll the proofs of appendix A.1 are a trivial consequence of manipulating the log of probability ratios, to end-up with a simple difference of logits. This is colloquially called the *margin*, not to be confused with the Output Margin (OM) measure that authors introduce, without clear motivation.\n\nIn most papers for NN certification, this margin is analyzed, reported or even optimized (using Hinge loss) in a straightforward manner, without bothering highlighting the link with output probabilities. In this regard, the theoretical contribution appears rather shallow IMHO. \n\nDropping this narrative would even allow to use the method of the paper outside the context of classification. Even the concept of “twin networks” looks overkill to simply describe networks operating over the same input/output spaces." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- It would be better to conduct a comparison with existing adversarial robustness metrics and methods. How do the proposed measures differ from the existing adversarial robustness measures? \n\n- What would be the applications of the framework beyond network twins (similar architectures with compact versions), such as varied neural architectures?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper’s primary contribution lies in defining and formalizing ROM and LROM. This enables a provable, quantitative comparison between neural networks for applications requiring high reliability and safety.\n\n- The study evaluates the proposed framework across multiple datasets, including standard and specialized medical data, supporting the generalizability and robustness of LROM as a comparative measure." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a framework for comparing two neural network classifiers with shared input and output domains. The goal is to analyze these \"neural network twins\" through the concept of Relative Output Margin (ROM), a metric indicating the confidence with which one network outperforms the other within a defined input region. Specifically, the framework formalizes and verifies \"Local Relative Output Margins\" (LROMs), allowing for the computation of provable bounds that indicate which network consistently makes correct decisions across input variations. This is crucial in applications where compact, optimized versions of networks are used, such as in medical device deployment, where safety-critical tasks like seizure and arrhythmia detection require guaranteed performance reliability. \n\nThe experiments in the paper evaluate the proposed Relative Output Margin (ROM) and Local Relative Output Margin (LROM) framework by testing it on four datasets: MNIST, CIFAR-10, CHB-MIT (EEG data for epilepsy detection), and MIT-BIH (ECG data for arrhythmia detection). The experiments focus on verifying LROM across different pairs of neural networks: original, pruned, quantized, and distilled versions. Across datasets, the experiments demonstrate that the LROM framework effectively captures the comparative performance and robustness of different network types under a defined perturbation range." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The LROM optimization framework requires handling complex linear programming tasks, which may limit scalability for larger networks. It would be better to test the framework further on larger neural networks, such language models. \n\n- It may be challenging to interpret the evaluated measures due to the technical intricacies involved in LROM computation." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See Weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- A novel notion of Relative Output Margin is proposed.\n\n- The organization (not writing) is very well.\n\n- Experiments on multiple experiments are presented to show the interesting property of ROM." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This manuscirpt define the notion of Relative Output Margin (ROM) and Local ROM (LROM) to compare network twins. Specifially, LROM > 0 means one network can consistently outperform another one in the vicinity of a given points. a theorem is provided the bound the LROM. Experiments on for datasets with 7-layer MLP are conducted to show the effectiveness of proposed LROM." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "## Major\n\n- The conclution is unclear.\n - In my opinion, the main theorem (Theorem 3.1) solely establish that there indeed exists upper and lower bounds for any LROM. If it is, the significance of this theorem is limited and LROM cannot be linked to the generalizability of DNNs.\n - There is no clear conclusion for experiments. What do the experiments show? \n\n- In the experiments, only a small 7-layer MLP is used, which hardly give a good predition for CIFAR-10, which makes a limited contribution. Can the authors provides the results at least on CNNs like VGG or ResNet?\n\n- In the experiments, \"We exclusively focus on **correctly** classified samples\" (Line 299). For me, a big application for ROM is to measure the uncertainty of predictions. If we have already know that the predictions is correct, there is no point to use this technique.\n\n## Minor\n\n- The writing can be improved.\n - In Introduction, more words are needed to breifly introduce the theoretical and empirical work ( now only 6 lines from Line 61-66), which will make this paper more clear.\n - The notation are too complicated and can be simplified.\n\n- It is better to number equations.\n\n- use $\\max$ and $\\min$ to replace $max$ and $min$ in the equations.\n\n- Line 215: \"$-\\mathcal{R}\\geq \\mathcal{M}$\" -> \"$-\\mathcal{R} \\geq -\\mathcal{M}$\"" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The authors point out that having a strictly positive/negative LROM between two networks is a sufficient condition to ensure that one makes better predictions than the other. I understand that ROM as formulated has the advantage of being invariant to the choice of threshold, but I find it paints an incomplete picture, and can be misleading. In particular, two networks can have 100% compatible predictions, yet the ROM value could vary significantly. Similarly, the same ROM can represent drastically different situations. In fact, if the two networks have log OM of +0 and +10, we are comparing a coin toss to an extremely accurate predictor. This is not the same as having two extremely confident networks with log OM of +1000 and +1010, but both situations have log ROM of +10.\n\nThe discussion on the effectiveness of adversarial training is interesting, however I can't seem to find any plot/table of the results described in the main text.\n\nI also would be interested in a more in depth discussion of the effects of the $\\delta$ parameter on the metric. Clearly for small enough values, the approach reduces to a pointwise comparison of model predictions. To justify the approach, it should be shown (at least empirically) that evaluating LROM gives significantly different results.\n\nThe choice of the correct $\\delta$ seems critical. If it is too large, the metric becomes meaningless and the number of verifiable points is likely to drop. Can you provide (at least) some heuristic to quantify a good value for $\\delta$?\nAlso, have you considered making it a function of $x$, trying to estimate the largest local region where the correct label remains constant?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper is well structured, clear and straight to the point. The contribution is well laid out, and the examples for the empirical evaluation are rich, relevant and diverse." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a methodology for verifying agreement in neural network that try to approximate the same target function.\nThe proposed method focuses on the predicted likelihood-ratio between two classes (denoted as OM). In particular, it gives local bounds on the difference in log OM between the two networks (log LROM). The bounds are given for local neighborhoods, assuming that the correct label is known and constant in the region. The bounds on log LROM are obtained relaxing the exact optimization objective to an approximate one, solvable with linear programming. \nThe paper evaluates the proposed approach on a variety of scenarios, comparing distillation and quantization techniques, as well as evaluating the robustness of adversarial trained networks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The proposed metric of ROM represents the difference in prediction confidence, and does not necessarily relate to the agreement of the predictions. The LROM metric is only informative when the bounds do not contain zero. Moreover, even assuming a point has verifiable LROM, it might not be verifiable in the desired direction.\n\nMoreover, the correctness of the LROM metric depends on the assumption that the correct label remains constant in the considered region. Therefore it is only sound for small enough neighborhoods. Looking at the empirical results, in many scenarios, the percentage of verifiable points rapidly drops to zero as the neighborhood size increases. \nIt is unclear if the proposed approach provides a qualitatively different result than a direct pointwise comparison of model predictions." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "This framework compares two neural networks by quantifying relative output margins and providing provably-correct bounds on decision quality across an input region." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024verified,\ntitle={Verified Relative Output Margins for Neural Network Twins},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0NEjIZlEhP},\nnote={under review}\n}" }, "abstract": { "value": "Given two neural network classifiers with the same input and output domains, our goal is to compare the two networks in relation to each other over an entire input region (e.g., within a vicinity of an input sample). Towards this, we introduce and quantify the Relative Output Margin (ROM) with which decisions are made. A larger output margin for a network w.r.t. another indicates that this network consistently makes a correct decision every time the other network does, and it does so in the entire input region. More importantly, as opposed to best-effort testing schemes, our framework is able to establish provably-correct (formally verified) bounds on ROM gains/losses over an entire input region. The proposed framework is relevant in the context of several application domains, e.g., for comparing a trained network and its corresponding compact (e.g., pruned, quantized, distilled) network. We evaluate our framework using the MNIST, CIFAR10, and two real-world medical datasets, to show its relevance." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Relative Output Margin", "Formal Verification", "Deep Neural Networks" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/bc645c187a5e61f687d2be520692f1ea98785891.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/eb7495dc265c4910c26c01a7eb0ce311e0195061.zip" }, "title": { "value": "Verified Relative Output Margins for Neural Network Twins" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0NvSMb7xgC
Auditing Predictive Models for Intersectional Biases
main
Active
predictive bias detection;fairness auditing;intersectional bias;contextual bias;group fairness definitions;subgroup bias;predictive bias
alignment, fairness, safety, privacy, and societal considerations
3;5;5;6
4;2;3;3
2;2;2;3
2;3;2;3
1;2;2;3
4.75
3
2.25
2.5
2
-0.648886
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. How difficult does it appear to be to extend this framework to a multi-label setting?\n2. How computationally expensive does this method get when scaling the number of groups to be evaluated over? As it stands it appears that the maximum number of intersectional groups is 4 in these experiments?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The method proposed is relatively simple to implement and rigorously grounded in the hypothesis testing literature\n- The method is flexible for the commonly discussed fairness metrics\n- The synthetic experiments are designed well to demonstrate the efficacy of the method in different scenarios and metrics" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This study develops a new statistical test for identifying bias in prediction models across four different axes based on both the probabilistic outputs and the binarized classifications. This test builds upon likelihood ratio tests developed in the spatial and subset scan statistics literature. At it's core this test is examining when the quantity of interest deviates significantly from its expectation across multiple intersectional subgroups. The test is then evaluated on a semi synthetic dataset based on COMPAS and then COMPAS itself. Demonstrating its ability to identify bias and the most significantly impacted groups." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The methods that are compared against seem to be quite old and I would be interested to see how they compare to newer methods in the literature (e.g. [1])\n- More real world dataset studies would improve the study (e.g. folktables [2])\n- The writing is verbose at times and could benefit from being more concise. This is especially true in Section 3 when describing the methods.\n\n[1] Cherian, John J., and Emmanuel J. Candès. \"Statistical inference for fairness auditing.\" Journal of Machine Learning Research 25.149 (2024): 1-49.\n[2] https://github.com/socialfoundations/folktables" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "Could the authors please address my concerns in the above section?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- **[Problem Importance]** The authors study an important problem.\n\n- **[Practicality]** The proposed method can accommodate a large number of fairness definitions that prior works are not able to accommodate." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors investigate the problem of intersectional bias in classification and develop a novel search method for identifying intersectional bias. The authors compare their method to other auditing methods on semi-synthetic data." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- **[Clarity]** Several important aspects of the paper are not articulated clearly. In particular, I found it difficult to follow the authors’ experimental design, a few examples include:\n - Section 4: What is a “row”? The authors refer to specific rows or “row $i$” without defining this term. Is the row a particular data point, or is it a row of the covariates?\n - When defining the true log-odds in their semi-sythetic data the authors say, “We use these weights to produce the true log-odds of a positive outcome $(Y_i = 1)$ for each row $i$ by a linear combination of the attribute values with these weights.”\nThis statement is quite vague and does not rigorously outline how the log-odds are computed. The authors denote the true log-odds as $L_i^{\\text{true}}$; perhaps a definition could be given for this quantity.\n\n\n- **[Narrative vs Experiments]** There is a strong disconnect between the authors' results and the discussion/motivation of the paper. For example, the authors spend substantial time going over different fairness metrics and discussing the applicability of their method to each metric. However, no experimental results are shown for any such metrics. The authors simply shift the predicted probabilities, or true probabilities, of some individuals by some value and measure whether their algorithm, or the baseline, can identify those individuals. I would have liked to see some results showing the accuracy of the authors' method as a function of subgroup unfairness under a particular metric. \n\n- **[Synthetic Data]** Due to the way in which the synthetic data is constructed, I find it difficult to appreciate some of the authors’ results. In particular, the authors randomly select sensitive attributes among all attributes in the data and change the true labels to have a noisy linear relationship with the features. Both choices destroy the innate relationships between features, sensitive attributes, and true labels, which cause unfairness in the base datasets (e.g., COMPAS). Further, it is not clear to me why we need synthetic data in the first place. The authors are working with two datasets that are known to possess innate bias both at the group level and the subgroup level; this begs the question as to why we are not shown results comparing the authors' method to SotA methods on these datasets without any synthetic modifications.\n\n\n- **[Simplistic Experiments]** In addition to the issues with synthetic data above, the authors only show experiments for two datasets and two classifier types.\n\n- **[Evaluation Metric]** When comparing to the baseline, the authors measure the IOU of the predicted subgroups $S^*$ and the subgroups with injected bias $S_{\\text{bias}}$, given on line 371. Without knowing whether or not the subgroups in $S_{\\text{bias}}$ are disadvantaged (and to what degree), it is difficult to appreciate the use of this metric. \n\n- **[Comparisons to Baselines]** When comparing their method to baselines, e.g., in Figure 1, the authors find that their method is only superior to baselines for relatively large amounts of bias. Moreover, in results such as Figure 2, it seems that the authors’ methods can achieve extremely poor accuracy depending on the type of bias present (negative or positive delta). Without apriori knowing the type of bias, it may be difficult to meaningfully apply these methods in practice. Lastly, the results in each of the aforementioned, and similar plots are difficult to interpret because we cannot understand how much bias a specific value of $\\delta$ or $\\mu$ corresponds to. It would be helpful to see the level of bias converted into actual fairness metrics." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "I have a few questions for the authors.\n\n* q1) Could the authors clarify my doubts regarding W1)? In particular, what are the authors' considerations regarding the emergence of the Yule effect?\n* q2) From Figure 3, it seems to me that the final results are heavily affected by which is the protected class specified. E.g., if we specify the \"Black defendants\" as the starting protected class, the subgroup with the highest F score (on Separation Scan for Recommendations) is \"Black Male Defendants\", while if we set \"Male Defendants\" as the starting class, the subgroup with the highest F score is \"Male Asian and Hispanic Defendants\". Even if I see why this occurs (in the first case, we refer to non-Black as the reference group, and in the second case, we refer to Females as the reference group), I would argue that this affects the ability of CBS to assess the intersectional biases occurring on a dataset. Can you comment on my observation further?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The main strengths of the paper are:\n\nS1) the motivation of the methodology is relevant, as identifying intersectional biases (in a tractable manner) is an open issue in the fairness literature;\n\nS2) the empirical evaluation supports the effectiveness of the method; \n\nS3) the algorithmic procedure for detecting the most significant subgroup seems novel." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a novel approach to auditing and detecting fairness biases in predictive models. The method, called Conditional Bias Scan (CBS), allows for identifying the subgroup with the most significant bias and comparing it with the equivalent subgroup in the non-protected class. Empirical evaluation suggests the effectiveness of the approach." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The main shortcomings of the current version of the paper are:\n\n\nW1) I think a few arguments should be taken into account and need further clarification:\n* in [Ruggieri et al., 2023], the authors show that algorithmic fairness objectives are not compositional, i.e., even if the classifier is fair on some of the regions of the input space, due to the emergence of Yule’s effect, the overall system is not necessarily fair. This could hinder CBS's ability to evaluate the overall fairness of the system.\n* in lines 183-184, the authors consider propensity score estimates for $Pr(A=1|X)$. This assumes (implicitly) that the protected group can be seen as a treatment variable, while this has been largely debated in the literature (see e.g., for gender [Hu and Kohler-Hausman, 2020]). A proper discussion of this aspect should be provided.\n\nW2) The overall presentation can be improved. For instance, I find the empirical evaluation in section 4 quite dense and difficult to follow. E.g., starting the whole section from lines 310-319 can help the reader better understand the purpose of the experimental evaluation and help describe the evaluation setup (e.g., datasets, baselines, hyperparameters and metrics). \n\nW3) The empirical evaluation can be improved. Currently, the evaluation is limited to the COMPAS and German Credit datasets, which are rather small scale. I would argue that testing CBS on larger-scale datasets such as $\\texttt{folktables}$ [Ding et al., 2023] and $\\texttt{WCLD}$ [Ash et al, 2023] would make the results more compelling. Moreover, I do think CBS can be exploited to audit different classifiers and their relative biases, even though such an experiment is not performed.\n\n\n[Hu and Kohler-Hausman, 2019] - Hu, Lily, and Issa Kohler-Hausmann. \"What's sex got to do with machine learning?.\" In Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency, pp. 513-513. 2020.\n\n[Ruggieri et al., 2023] - Ruggieri, Salvatore, Jose M. Alvarez, Andrea Pugnana, Laura State and Franco Turini. \"Can we trust fair-AI?.\" In Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, no. 13, pp. 15421-15430. 2023.\n\n[Ding et al., 2021] - Ding, Frances, Moritz Hardt, John Miller, and Ludwig Schmidt. \"Retiring adult: New datasets for fair machine learning.\" Advances in neural information processing systems 34 (2021): 6478-6490.\n\n[Ash et al., 2023] - Ash, Elliott, Naman Goel, Nianyun Li, Claudia Marangon, and Peiyao Sun. \"WCLD: curated large dataset of criminal cases from Wisconsin circuit courts.\" Advances in Neural Information Processing Systems 36 (2023): 12626-12643." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. How does CBS handle scenarios with continuous covariates without discretization, and how does it address the potential loss of valuable information during this process?\n2. What are the implications of using different models for estimating conditional expectations on the detection of biases? Beyond modeling the COMPAS data, is there additional evidence of real-world datasets to support the effectiveness of this method? See also weakness 2. \n3. Could you clarify the distinction between auditing the COMPAS dataset and the model itself? The explanation in Section 5 is not entirely clear.\n4. Regarding Figure 3: Does the framework detect the entire group of defendants under the age of 25? What is the role of the conditional variable C in the null hypothesis in this case?\n5. Consider making the tables more consistent in terms of formatting and presentation for easier comparison." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper provides a framework for auditing intersectional biases, a crucial area often overlooked in fairness assessments (detection of gerrymandering). \n2. The proposed method can accommodate different group fairness metrics and can effectively scan numerous subgroups." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors address the challenge of auditing machine learning models for intersectional biases (fairness gerrymandering). They introduce a methodology called Conditional Bias Scan (CBS) for detecting biases that affect specific subgroups, which may arise from intersectional factors (membership in two or more protected classes) or contextual factors (decision situations). The CBS methodology involves four stages: (1) initializing the event variable I, protected class A, covariates X, and conditional variable C based on the input parameters and chosen fairness definition; (2) estimating the expected value of I under the null hypothesis; (3) using a multidimensional subset scan to identify subgroups that systematically deviate from the expected values computed in step (2) and selecting the most significant ones; and (4) assessing the statistical significance of the detected subgroups. The paper includes a comprehensive experimental evaluation to validate the approach." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The reliability of the estimation of expectations I under the null hypothesis depends on having well-specified models for estimating the propensity scores of the protected class. \n2. The paper is quite dense and challenging to follow. It would benefit from providing more intuitive explanations or examples to illustrate why the overall method is effective in real-world scenarios. This would help readers better understand the practical implications and the rationale behind the approach." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Conditional Bias Scan is an auditing framework for detecting intersectional and subgroup biases in classifiers' predictions." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024auditing,\ntitle={Auditing Predictive Models for Intersectional Biases},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0NvSMb7xgC},\nnote={under review}\n}" }, "abstract": { "value": "Predictive models that satisfy group fairness criteria in aggregate for members of a protected class, but do not guarantee subgroup fairness, could produce biased predictions for individuals at the intersection of two or more protected classes. To address this risk, we propose Conditional Bias Scan (CBS), an auditing framework for detecting intersectional biases in classification models. CBS identifies the subgroup with the most significant bias against the protected class, compared to the equivalent subgroup in the non-protected class, and can incorporate multiple commonly used fairness definitions for both probabilistic and binarized predictions. We show that this methodology can detect subgroup biases in the COMPAS pre-trial risk assessment tool and in German Credit Data, and has higher bias detection power compared to similar methods that audit for subgroup fairness." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "predictive bias detection", "fairness auditing", "intersectional bias", "contextual bias", "group fairness definitions", "subgroup bias", "predictive bias" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/ab43f55ec91ae1c8abcf041b627f96d4c13a52c5.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/3bcab13767014bdc4ed87edf2ecdef3d183f4fdb.zip" }, "title": { "value": "Auditing Predictive Models for Intersectional Biases" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0OB3RVmTXE
Unstable Unlearning: The Hidden Risk of Concept Resurgence in Diffusion Models
main
Active
machine unlearning;concept unlearning;evaluation;diffusion models;text to image
alignment, fairness, safety, privacy, and societal considerations
3;3;5;5
4;4;4;3
1;2;2;2
3;2;2;3
3;2;3;2
4
3.75
1.75
2.5
2.5
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- What exactly is meant by “mapping concept”? I read the paper carefully but still find the term’s exact definition unclear. Did the authors use this term in the same way as in the MACE paper?\n- Regarding Figure 5, what would happen if 10 or 100 objects were removed, as in the celebrity erasure task?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper tackles a timely and practically-relevant problem supported by a fair amount of experiments. Model unlearning regarding AI safety is an area with limited prior research, making this work particularly valuable.\n- This work stands as a pioneering study in attempting to identify concept resurgence phenomenon regarding text-to-image diffusion models." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper examines a significant vulnerability in text-to-image diffusion models regarding the unlearning of unwanted concepts, termed \"concept resurgence.\" It demonstrates that fine-tuning diffusion models on seemingly unrelated and benign data can inadvertently lead to the re-emergence of previously erased concepts. This vulnerability raises serious concerns about the reliability of current unlearning methods, particularly for developers aiming to protect users from undesirable content. The authors conducted experiments using Stable Diffusion v1.4 and the Mass Concept Erasure (MACE) technique, revealing that concept resurgence can occur even under benign conditions. Further, the authors explore and try to identify various factors which may contribute to this issue such as the choice of fine-tuning data and the regularization applied during unlearning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The main weakness of this paper is its limited experimental scope. While the paper's key contribution is the concept resurgence phenomenon, it is supported only by limited empirical evidence. This calls for testing the phenomenon in various setups, yet the authors only use a single model, SD v1.4. Given the availability of advanced models such as SDXL, EDM, MDT, and FLUX, it would be helpful to see experiments using other diffusion models, particularly those trained with flow matching objectives instead of score matching losses. Additionally, the authors exclude tasks related to artistic style removal and explicit content removal, citing evaluation challenges. However, it would still be valuable to demonstrate the concept resurgence phenomenon in these tasks, even if a fair evaluation is difficult. The current experimental setup is also limited in terms of dataset diversity. Providing additional qualitative examples beyond Figures 2 and 4 would strengthen the paper.\n- To my understanding, this paper only experimented with a single unlearning technique, MACE. The authors need to explore more existing methods such as UCE, FMN, ESD, SDD etc. Even if MACE is a SOTA unlearning method, concept resurgence may not appear with the other baselines. Section 4.2, in particular, would benefit from a broader discussion of baseline methods. \n- The authors propose three potential contributors to concept resurgence: mapping concept, regularization, and fine-tuning algorithms. However, the discussion in Section 4 lacks depth. The authors should offer theoretical justifications or at least propose a main hypothesis supported by empirical evidence. For example, in Figure 7, they suggest that “increasing regularization increases concept resurgence in the celebrity erasure task, but has little impact on the object erasure task.” It would be helpful to identify the key factor causing this difference and explore how this factor might be used to prevent concept resurgence. Further, the authors conclude that the difference between full fine-tuning and LoRA fine-tuning does not affect concept resurgence. However, if sufficiently “distant” fine-tuning can prevent concept resurgence, wouldn’t full fine-tuning be more effective than LoRA in doing so?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please kindly refer to the Weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "* The authors identify a previously unknown vulnerability (concept resurgence) in diffusion models, which is important for understanding the limitations of current model update strategies.\n* This paper systematically examines both algorithmic and data-dependent factors contributing to concept resurgence, providing a detailed understanding of the phenomenon.\n* The research has direct implications for the development and safety of diffusion models, as it highlights the need to address concept resurgence to ensure reliable and safe model performance." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper focuses on the concept of “concept resurgence” in text-to-image diffusion models. These models are often updated incrementally through fine-tuning and unlearning steps. The authors demonstrate that fine-tuning a diffusion model can cause previously “unlearned” concepts to reappear, even when fine-tuning on seemingly unrelated data. They conduct experiments using Stable Diffusion v1.4 and the Mass Concept Erasure (MACE) technique. The study investigates factors contributing to concept resurgence, including algorithmic choices (such as mapping concepts, regularization, and fine-tuning algorithms) and data-dependent factors (like CLIP distance and out-of-domain concepts). The findings highlight the fragility of current model update paradigms and raise concerns about ensuring the safety and alignment of diffusion models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* While this paper focuses mainly on MACE as the unlearning algorithm, it remains unclear whether the observed results could be fully generalizable to other unlearning techniques, which can be considered to add for more comprehensive analysis.\n* Since we cannot enumerate all possible concepts during evaluation, could the authors provide some insights on the metrics that we can use to measure the difficulty of the resurgence of a certain concept? This might help to reach a more general conclusion of the experiments.\n* Aside from the two examined celebrity and object erasure tasks and specific benchmarks, it would be better to extend the evaluation on more diverse settings to see if the findings still hold.\n* Minor: Though it might be out of the scope of this manuscript, it is very interesting to have some theoretical analysis regarding the observations." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* Can you apply additional unlearning methods (hopefully five different methods) to show the same concept resurgence phenomenon?\n* Can you visualize a target concept and out-of-domain concepts to show some (semantic) distance between them?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper introduces a very interesting phenomenon of unlearned models – concept resurgence. To my understanding, this observation hasn’t been discussed in the unlearning domain." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper reports an interesting behavior of unlearned diffusion models, called concept resurgence – when a concept is unlearned from a diffusion model, this concept is observable again after fine-tuning. The cause of this phenomenon is analyzed in two ways: algorithmic factors and data-dependent factors. In short, concept resurgence occurs when unlearned model parameters are close to the parameters of a pre-trained model and when fine-tuning data is correlated to training sets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The supporting experiments are slightly below the ICLR standard. Overall, this paper should justify their claim via experiments but the supporting experiments are weak/handful. \n\n* The interesting phenomenon is only evaluated on one unlearning model (i.e., MACE). Additional unlearning methods are needs to be evaluated on hopefully five different methods, e.g., Selective Amnesia (https://arxiv.org/abs/2305.10120), SALIENCY UNLEARNING (https://arxiv.org/abs/2310.12508), and more.\n* For out-of-domain concepts, it would be useful to add some visualization on the correlation between a target concept and out-of-domain concepts." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Can the authors provide details on what the 'others' and 'synonyms' are in the different figures? \n- Could the authors provide more experiment details, for e.g., what was the fine-tuning procedure to induce concept resurgence? Information like hyperpameters to reproduce the experiments are missing" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- This idea of concept resurgence is very interesting and pertinent to the safety/concept unlearning community in text-to-image models. To my knowledge this is the first work to identify such an issue. \n- The paper is well-written and ideas are clearly communicated." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper investigates the phenomenon of concept resurgence in text-to-image diffusion models that have been fine-tuned to forget certain concepts. The authors show that after erasing certain concepts with MACE, fine-tuning on unrelated concepts can reintroduce the erased concepts. The authors carry out experiments where several parameters of the erasing/fine-tuning are varied to elucidate the various factors that contribute to concept resurgence." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The experiments in the paper are only on models erased with MACE, although numerous SD erasure works [1,2] have been proposed. Without experiments on a few more baselines, logically speaking the evidence from the paper only supports the claim that concept resurgence occurs on models erased with MACE rather than in general, which would weaken its impact. \n\n- Sec 4.3 seems to contradict the hypothesis that concept resurgence is more prominent if the weights from erasure were not moved far from the original weights, since I would assume LoRA makes smaller weight changes than full fine-tuning, yet the effects on resurgence are similar. Could the authors make this more quantitative and measure the deviation of the weights from the original values, for e.g., in the L2 sense?\n\n- Have the authors tested resurgence on truly more 'abstract' concepts like nudity or violence? The current experiments focus on relatively 'easier' concepts that can be defined by a single or few synonyms, like the name of the celebrity or object. Concepts like nudity can be expressed by numerous synonyms and even abstractly by the names of artists who paint with nude styles, for example.\n\n- Overall I found that the technical contribution of the paper to be somewhat lacking by ICLR's standards, even though the phenomena presented is novel. The experiments are focused on one baseline and two concept types (celebrities and objects). As the authors acknowledge in the limitations, the paper lacks theoretical insights into concept resurgence or any mitigation strategies. \n\nMinor points:\n- consider moving Eq 1 to the front of the paper and introduce MACE more thoroughly given that the experiments in the paper are focused on MACE.\n- some missing references on early works in the area of erasure/safety in text-to-image models [1,2,3,4].\n\n[1] Zhang, Eric, et al. \"Forget-Me-Not: Learning to Forget in Text-to-Image Diffusion Models. ArXiv abs/2303.17591 (2023).\" (2023).\n\n[2] Gandikota, Rohit, et al. \"Erasing concepts from diffusion models.\" Proceedings of the IEEE/CVF International Conference on Computer Vision. 2023.\n\n[3] Heng, Alvin, and Harold Soh. \"Selective amnesia: A continual learning approach to forgetting in deep generative models.\" Advances in Neural Information Processing Systems 36 (2024).\n\n[4] Schramowski, Patrick, et al. \"Safe latent diffusion: Mitigating inappropriate degeneration in diffusion models.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2023." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Under benign, non-adversarial conditions, fine-tuning a text-to-image diffusion model on seemingly unrelated data can cause it to \"relearn\" concepts that were previously erased." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024unstable,\ntitle={Unstable Unlearning: The Hidden Risk of Concept Resurgence in Diffusion Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0OB3RVmTXE},\nnote={under review}\n}" }, "abstract": { "value": "Text-to-image diffusion models rely on massive, web-scale datasets. Training them from scratch is computationally expensive, and as a result, developers often prefer to make incremental updates to existing models. These updates often compose fine-tuning steps (to learn new concepts or improve model performance) with “unlearning” steps (to “forget” existing concepts, such as copyrighted data or the ability to generate explicit content). In this work, we demonstrate a critical and previously unknown vulnerability that arises in this paradigm: even under benign, non-adversarial conditions, fine-tuning a text-to-image diffusion model on seemingly unrelated images can cause it to “relearn” concepts that were previously “unlearned.” We comprehensively investigate the causes and scope of this phenomenon, which we term concept resurgence, by performing a series of experiments based on fine-tuning Stable Diffusion v1.4 alongside “mass concept erasure”, the current state of the art for unlearning in text-to-image diffusion models (Lu et al., 2024). Our findings underscore the fragility of composing incremental model updates, and raise new serious concerns about current approaches to ensuring the safety and alignment of text-to-image diffusion models." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "machine unlearning", "concept unlearning", "evaluation", "diffusion models", "text to image" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/fd6e9fdc54339d21606d457b90563464da09969a.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/dd8094a6b00800141a717810479877d8175ec940.zip" }, "title": { "value": "Unstable Unlearning: The Hidden Risk of Concept Resurgence in Diffusion Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0OTVNEm9N4
Rethinking Copyright Infringements In the Era Of Text-to-Image Generative Models
main
Active
evaluating copying;copyright;generative ai;text-to-image;ai art;law;interpretability;social impact
alignment, fairness, safety, privacy, and societal considerations
3;5;6;8
4;4;4;3
1;2;3;4
2;2;3;4
3;2;3;4
5.5
3.75
2.5
2.75
3
-0.800641
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. How does ArtSavant perform when applied to more obscure or emerging artists whose styles may be less distinctive or well-known?\n2. The TagMatch method relies on zero-shot tagging with CLIP, which may not capture subtleties in artistic style. Have the authors considered evaluating the reliability of TagMatch across different art genres or complex styles, and could a more refined tagging approach improve interpretability and consistency?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper addresses a timely issue -- potential copyright infringements in text-to-image generation -- that bridges technical, legal, and ethical domains. \n2. ArtSavant’s combination of DeepMatch and TagMatch represents a thoughtful approach, with one method offering high accuracy and the other interpretability. This approach is likely beneficial for non-technical audiences, such as legal professionals and artists.\n3. The paper is well-grounded in legal discussions, positioning ArtSavant as a tool that can potentially support legal decision-making regarding style infringement." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces ArtSavant, a tool designed to assess artistic style copying in text-to-image generative models. Built on legal scholarship, ArtSavant combines two methods, DeepMatch (a neural classifier) and TagMatch (an interpretable tag-based approach), to detect unique artistic styles and assess whether they are replicated in generated images. An empirical study using ArtSavant indicates that around 20% of the artists in their dataset appear at risk of style copying by generative models, raising concerns for the need to protect artistic styles under copyright." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The use of a limited reference dataset (372 artists) could affect the generalizability of ArtSavant’s findings, especially for artists with less established styles. Expanding the dataset to include more diverse artistic styles could strengthen the conclusions.\n2. ArtSavant may struggle with assessing artists whose work doesn’t conform to traditional or well-known styles, limiting its broader applicability. It may inadvertently favor more mainstream artistic elements, possibly overlooking style copying for non-Western, niche, or experimental art styles.\n3. Although TagMatch aims to make the tool interpretable, the subjectivity inherent in artistic tagging could affect its reliability, especially in legal contexts. This may be partially addressed by improving tagging accuracy, as noted by the authors." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. In line 21, there should be a space between the method name and the next word. \n\n2. How many training examples from one artist are required to reliably detect the style of that single artist in DeepMatch? \n\n3. Do DeepMatch and TagMatch provide different predictions for certain examples? If so, in what situations does this occur, and what are the characteristics of these artworks that lead to differing predictions?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "1. TagMatch offers an interpretable method for identifying stylistic elements, making it particularly valuable in legal contexts where explainability is essential.\n\n2. The paper includes a comprehensive evaluation of the proposed methods, including both quantitative and human evaluation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces ArtSavant, an explainable classifier for identifying artistic style infringements in generated art. The proposed framework consists of DeepMatch, a black-box neural classifier, and TagMatch, an interpretable tag-based method, to quantify the uniqueness of an artist’s style and recognize if it appears in generated images. The central idea is that if an artist’s works are consistently recognizable, they contain a unique style that can be classified. The approach uses both holistic and analytic style comparisons. It combines CLIP embeddings and tagged stylistic elements to support style infringement claims in a legally relevant, interpretable manner." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. TagMatch relies on LLMs to generate concept vocabularies, which may limit its effectiveness for less-known artists whose stylistic elements may not be well-covered in pretraining data. Could you show how TagMatch performs on less-known artists? If there are some gaps between known and well-known artists, I am curious if there is way to enhance the vocabulary to better capture these unique styles?\n\n2. DeepMatch uses a back-box for detection. However, such black-box classifiers may pick up on spurious details rather than genuine stylistic features. For example, if an artist always includes a certain animal in his art works, DeepMatch might use this feature to classify the style. Could you provide some evidence that DeepMatch’s classification is based on broader stylistic elements instead of just this minor feature?\n\n3. The preliminary study uses DINO features, which might be limited in representing stylistic nuances. Could you explore using features that are specifically trained for style similarity [1] to compare with your method as a baseline? What is the pro and con for classifier based approach proposed in this paper and embedding based approach? \n\n\n4. The authors noted that a new artist could easily retrain the detector to include their works for the DeepMatch approach, as it’s quite efficient. However, I’m curious about the potential impact on performance. Does retraining lead to issues like catastrophic forgetting of previously learned styles? It would be interesting to see a case study where the existing classifier is expanded to include new artists, observing how this affects both new and original classifications.\n\n[1] Unsupervised Image Style Embeddings for Retrieval and Recognition Tasks" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Could you provide more quantitative and qualitative discussions for similarity-based vs. style based method\n2. would appreciate any further clarifications regarding my concerns about Weaknesses. And I am willing to raise my score if I find them convincing" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper is well-written and addresses a timely, important problem relevant to today’s AI and creative industries. The authors provide a solid combination of qualitative and quantitative results that contribute valuable insights into the field.\n2. Considering “style” as a central focus is an innovative approach. By shifting from image-wise similarity detection to a style-based classification specific to individual artists, the paper redefines the task in a way that offers a deeper understanding of style infringement.\n3. The paper also emphasizes interpretability through the TagMatch method, which is especially useful in legal contexts, where clarity on how stylistic similarities are identified can support arguments around style infringement." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper explores a significant question of how GenAI might infringe upon the styles of individual artists and if legal frameworks could protect these styles. In particular, the author developed a tool, ArtSavant, to measure and quantify artistic style infringement. ArtSavant mainly utilizes two methods: \n* DeepMatch: aneural network classifier to establish a recognizable \"signature\" for an artist's style based on images. \n* TagMatch: An interpretable, tag-based method, which decomposes artworks into stylistic elements or \"tags\".\n\nTheir empirical results show that GenAI models have the potential to reproduce unique artistic styles, rasing copyright concerns." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Although I enjoyed reading this paper and find “style” to be an intriguing approach to this problem, I am concerned about the inherent ambiguity surrounding this concept. The paper assumes that “style” can be quantitatively defined and detected, yet style is fundamentally a qualitative and fluid concept, often shaped by subjective interpretation. Additionally, even in the real world, many artists have very similar “styles,” which complicates the notion of unique stylistic signatures.\n\n2. I wonder how a similarity-based method would perform on this dataset (please correct me if I missed this comparison in the paper). Are there cases where the style-based method detects something that a similarity-based method does not, or vice versa? A direct comparison could provide clearer insights into the advantages and limitations of each approach.\n\n3. Regarding TagMatch, I understand its goal of enhancing interpretability; however, I find it somewhat limited in scope. First, it’s a relatively naive approach in some respects, relying solely on zero-shot CLIP with predefined tags. Second, “style” implies something more subtle and nuanced than broad artistic categories. Even within the same category, there can be vast differences between artworks, so I’m unsure of TagMatch’s practical utility in capturing the deeper, unique aspects of an artist’s style." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Is the goal of the paper to create an automated system to make quick judgements about style infringement without requiring humans to look?\n\nDoes your goal contradict Sobel's view that these judgments are not possible to articulate in clear categories, and that they are inherently the province of a human jury to make?\n\nTo what extent do you believe that your system matches the style-infringement judgements that a jury would make \"by hand\"?\n\nWhat kinds of failure cases does the system have? What patterns characterize these failures?\n\nIf another scientist wishes to improve upon your system, what measurement can they do, that would indicate that they have made an improved system?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The problem area is important, with image synthesis models creating potentially huge economic impacts for the artistic professions. There is a need for scientific analysis to help guide discussions about implications for copyright and copyright law. Quantifying the amount of style imitation in the large models is a worthy goal. And in developing its methods, the paper recognizes the importance of interpretable explanations when it comes to human arguments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper aims to develop an “intuitive, automatic, legally-grounded” approach to determine style infringement. To do this, it trains two classifiers: one “DeepMatch,” a traditional image classifier trained to classify 372 artists based on a WikiArt training set, and a second, “TagMatch,” which classifies artist styles using a human-interpretable tag-matching heuristic on top of more than 100 CLIP-derived tags across 10 aspects of artistic styles. Finally it conducts a measurement of images generated by popular diffusion models to quantify the number that resemble an artist’s style according to DeepMatch, and generates some explanations using TagMatch." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper is not ready for publication in ICLR. There are several specific problems\n\n1. The legal discussion is not well-supported, and it is not sufficiently pedagogical for this audience.\n2. The suitability of the classifiers for the task is not sufficiently measured or justified or compared to previous technical work.\n3. The evaluation of style imitation in diffusion models does not support its conclusions.\n\nThe legal discussion is the most serious problem. For the technical audience at ICLR, a paper discussing legal issues must play a tutorial role. The audience at the conference is technical and not made of legal experts, so when making claims about legal issues, it is especially important for information about legal reasoning to be representative, explanatory, and correct. In contrast, this paper seems to be advancing an adventurous legal viewpoint in a venue with a legally unsophisticated audience.\n\nSpecifically, in the section on the legal framework, the paper puts in boldface: “**the task of showing the existence and uniqueness of artistic styles can be reduced to classification** – something deep networks are particularly adept at doing.” That claim appears to contradict the paper’s own legal citations. For example, when contemplating legal tests for style infringement, the cited work by Sobel makes a distinction between “extrinsic” similarity that can be described in words as opposed to an “intrinsic similarity” which is perceived by humans but not easily described in a mechanical way. Sobel illustrates the distinction by surveying many subtle situations that have appeared in case law. In the perspective provided by Adobe’s proposed style infringement legislation, the test is not pinned on the output, but rather, the intent of the AI user is placed at the center of style infringement. Both of these legal perspectives seem to be at odds with paper’s proposed reduction of the style infringement test to an automated and mechanical artist-identification image classification problem. Neither of these central legal issues are surfaced to the ICLR reader: the paper omits any contemplation, measurement, or comparison to the intrinsic judgements that would need to be made by a jury, nor does it make any measurement, prediction, or discussion of intent by the user of the AI.\n\nThis reviewer strongly feels that ICLR should not be the place to advance a new legal theory. Plenty of scientific questions arise in the legal discussions, such as whether automated methods might be able to anticipate the judgement of a jury (and if not, why not), or whether the intent of the user can be correctly guessed by an automated method. At ICLR it would be more appropriate for the paper to pose and investigate a scientific question, and should not lead with a novel legal theory in boldface.\n\n\n\nOn the suitability of the classifier. More careful comparisons to previous work are needed. Several previous works have focused on the style classification such as Karayev and van Noord cited in footnote 2. However, the current work does not attempt to compare its approaches to any previous approaches, and it does not build on top of any of the evaluation approaches. For example van Noord takes on artist identification using the “Rijksmuseum Challenge” and analyzes and breaks down failed predictions. Do the proposed classifiers work better or worse than van Noord? Do they fail in similar ways? What is different in the technical approach that might lead us to expect the classifiers are more suitable? Another insufficient comparison is between TagMatch and Concept Bottleneck Models. Table 1 in the appendix does a single pair of comparisons but does not quantify the sparsity advantage of TagMatch, or give any systematic comparison of meaningfulness to humans. The heuristic in TagMatch seems ad-hoc: why would we expect its sparse set of labels to be more meaningful to a jury than the ones provided by CBM? No evaluation on that is done.\n\n\n\nOn the evaluation of existing style copying. The paper’s conclusions are not sufficiently supported. The paper’s analysis of output from Stable Diffusion and OpenJourney concludes that most of the artist styles are not copied accurately, identifying just 16 of 372 artists whose styles are copied strongly. However, no triangulation is done on this measurement, so it is unclear whether the low rate of identification is due to a weakness in the classifier, or whether it is due to a lack of style-imitation in the diffusion models. A human evaluation on generated art style imitation could be done to check the estimate. Or larger-scale data resources could be used, for example, the “parrotzone.art” project has identified several thousand styles that SD copies well, and these could potentially be used as an independent source of human assessment of style similarity." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Rethinking how we define artistic style and characterize style infringement, with an efficient and interpretable quantitative framework based in legal scholarship" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024rethinking,\ntitle={Rethinking Copyright Infringements In the Era Of Text-to-Image Generative Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0OTVNEm9N4},\nnote={under review}\n}" }, "abstract": { "value": "The advent of text-to-image generative models has led artists to worry that their individual styles may be copied, creating a pressing need to reconsider the lack of protection for artistic styles under copyright law. This requires answering challenging questions, like what defines style and what constitutes style infringment. In this work, we build on prior legal scholarship to develop an automatic and interpretable framework to \\emph{quantitatively} assess style infringement. Our methods hinge on a simple logical argument: if an artist's works can consistently be recognized as their own, then they have a unique style. Based on this argument, we introduce ArtSavant, a practical (i.e., efficient and easy to understand) tool to (i) determine the unique style of an artist by comparing it to a reference corpus of works from hundreds of artists, and (ii) recognize if the identified style reappears in generated images. We then apply ArtSavant in an empirical study to quantify the prevalence of artistic style copying across 3 popular text-to-image generative models, finding that under simple prompting, $20\\%$ of $372$ prolific artists studied appear to have their styles be at risk of copying by today's generative models. Our findings show that prior legal arguments can be operationalized in quantitative ways, towards more nuanced examination of the issue of artistic style infringements." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "evaluating copying", "copyright", "generative ai", "text-to-image", "ai art", "law", "interpretability", "social impact" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/57843966202f26439d2fd0b68a0434341a8b953c.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Rethinking Copyright Infringements In the Era Of Text-to-Image Generative Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0OzDMjPHa3
Efficient Visualization of Implicit Neural Representations via Weight Matrix Analysis
main
Active
Implicit neural representation;pruning;visualization;adaptive mesh refinement
other topics in machine learning (i.e., none of the above)
3;3;3;5
5;3;3;3
1;2;2;2
2;2;1;3
2;3;1;2
3.5
3.5
1.75
2
2
-0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1 Terminology: Key terms such as \"domain\" and \"adaptive mesh\" need clearer definitions. Are there specific examples or illustrations that could be added?\n\n2 ID pruning\n\n2.1 What is the computational cost of ID? \n\n2.2 Why is the number of samples set to the width of the INR layers? \n\n2.3 How does this hyper-parameter impact the final performance? \n\n\n3 There is no detailed discussion on the computational costs of the algorithm.\n\n4 The paper lists multiple hyperparameters but does not explain how they were chosen or their impact on the algorithm’s performance.\n\n5 Including comparisons with state-of-the-art INR visualization methods or adaptive algorithms would deepen the insights and show the algorithm's standing in the broader research landscape.\n\n6 Expanding experiments to larger datasets would better illustrate the scalability and robustness of the approach." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The target problem, reducing cost in visualizing INRs, is meaningful. \n2. The qualitative results show some improvement compared to AMR." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a new algorithm for visualizing implicit neural representations (INRs) through a pruning-based approach. The method determines the high-detail regions in pre-trained INRs and then uses adaptive mesh refinement to split up the domain, thus saving memory. The results show that the proposed algorithm can achieve comparable visualization accuracy while using fewer degrees of freedom than uniform grid discretization or basic AMR. However, the contribution is incremental, the presentation requires improvement, and the experimental section is weak." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The novelty is unclear. The paper combines established techniques without adequately explaining the challenges or novel solutions provided.\n\n2. The experiment section is weak. \nThe experimental results are limited; more comparisons with advanced visualization techniques for INRs would strengthen the evaluation.\n\n3. The paper's clarity and organization could be significantly improved." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "In this paper, authors propose a hypothesis that *the less detailed a function is on a region of the domain, the smaller an INR needs to be to accurately describe the function in that region*. Is there any verification on this hypothesis? For example, the relationship between function details and the INR size across different levels of detailed regions. The authors could provide some empirical results on the appendix. It's important to give a comprehensive verification since it is the foundation of the whole paper." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper proposes a dynamic adaptation method for INR visualization that keeps high resolution in high-detailed region while reducing resolution in low-detailed region. This is efficient in memory saving, especially useful in large scale 3D/4D data visualization.\n2. The combination of AMR and ID for variable resolution is interesting. It saves computational resources by avoiding computation on low-detailed region reconstruction.\n3. It shows great potential in real-world application by the experiments in the CT dataset." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents an efficient method for visualizing Implicit Neural Representations (INRs) by adaptively refining only high-detail regions, identified through pruning of weight matrices. This approach maintains visualization quality with reduced memory use, as it avoids uniform discretization. Tests on CT scan data show it can achieve detailed visualization while significantly lowering computational demands, making it ideal for large-scale, dynamic data" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. This paper lacks of theoretical analysis on how AMR and ID succeed in high-detail INR model visualization. Since AMR and ID have been widely studied and well developed. The combination is not a novel enough approach to this problem. Here are some points that I think important to analyze:\n - It is necessary to explain how important information is preserved when ID pruning in INR, especially in high-detailed regions. Since the representational capacity of INR is directly related to the size of the weight matrix, a sufficient analysis on how ID pruning affects the reconstruction accuracy of INR is important.\n - Pruning can impact the local approximation accuracy of the INR model, so it’s essential to analyze whether sufficient details can still be retained after pruning at various mesh resolutions. This aspect could be supported by a quantitative analysis on the relationship between pruning rate and error in different levels of mesh resolutions.\n - AMR relies on local error criteria, but ID pruning may reduce reconstruction accuracy in certain regions, potentially missing some details if not properly controlled. Therefore, it is necessary to analyze the impact of pruning on AMR’s local error estimation.\n1. This paper gives a preliminary experiment and 2 CT experiments. The datasets are simple, not enough to support the efficiency of their algorithm. I suggest doing experiments on some medical CT datasets, e.g. [LUNA16](https://luna16.grand-challenge.org/Data/).\n1. The authors show the influence of the hyperparameters to the results in their experiments, but this discussion is not enough. Across the 3 experiments, the hyperparameter $T$ varies from $10^{-4}$ to $10^{-1}$, $\\epsilon$ varies from $10^{-3}$ to $10^{-2}$. The range is too big for users to find a set of useful settings. Are there any guiding rules on how to choose the hyperparameters with respect to the dataset? It's also unclear to me if the choice of $T$ and $\\epsilon$ affects each other. I would suggest a more comprehensive ablation study on the choice of the hyperparameters $T$, $P$, and $\\epsilon$.\n1. This algorithm uses ID iteratively. I wonder if the computational cost will increase exponentially when it comes to the high dimensional dataset or large scale dataset? I suggest the authors giving a time complexity analysis with respect to the dataset scale and the dimensionality. The authors could also provide the runtime results on larger datasets (e.g. [LUNA16](https://luna16.grand-challenge.org/Data/)) if possible.\n\nBesides, there are some minor issues:\n\n5. There is a misspelling in the last sentence of **INPUT** in algorithm1, it should be \"to\" instead of \"ot\"\n6. In algorithm1, the confition of the second for loop says M.E.done_refining == False, but I can't find anywhere that sets it false in the algorithm.\n7. There are too many long sentences that take up to 3 lines. I would suggest breaking them down for reading." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- How does the proposed adaptive mesh generation from INRs compare with other data structures traditionally used for storing high-resolution data?\n\n- Why was the \"Basic\" approach chosen as the baseline?\n\n- What is the motivation for selecting pruning as the primary optimization technique, specifically for adaptive meshing of INRs?\n\n- How do the storage requirements of INRs compare to those of the generated adaptive mesh, and could a comparative analysis be provided? And generally how do drectly visualizing the INR compare to the adaptive mesh?\n\n- Would the authors provide a time-based evaluation comparing the efficiency of pruning-based adaptive mesh refinement against the Basic approach for mesh construction?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper presents a compelling approach by incorporating pruning techniques into the generation of adaptive meshes based on implicit neural representations (INRs). This is an innovative idea that effectively leverages the strengths of INRs, making the visualization process more efficient and resource-conscious. The introduction of a method to visualize INRs adaptively addresses an important gap, and it highlights the potential of INRs to be used more broadly and effectively in high-resolution data applications. This direction holds promise and warrants further exploration to fully harness the benefits of INRs in visualization and beyond." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the challenge of efficiently visualizing implicit neural representations (INRs), which are well-suited for storing high-resolution data like time-varying volumetric data. Traditional approaches typically discretize INRs to a uniform grid, leading to a loss of the inherent advantages of INRs. To tackle this, the paper introduces an algorithm that generates an adaptive mesh by pruning the weight matrices of the INR. The key insight is that areas with low variation in the INR can tolerate more aggressive pruning than highly variable regions, enabling the mesh to be refined and adapted. This approach aims to maintain the INR's resource efficiency, even in visualization contexts." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper introduces an innovative idea with significant potential, and the research direction it proposes opens exciting new avenues for leveraging INRs directly during visualization. However, despite these strengths, the paper does not feel fully 'finalized' for publication. There are several areas that would benefit from further development to strengthen its contribution. For details see below.\n\nFirst, although the adaptive mesh generation from INRs is well-motivated, alternative data structures commonly used to store high-resolution data are not evaluated, and comparisons with these could provide additional insights. Additionally, the term 'visualization' may be somewhat misleading, as the method centers on adaptive mesh generation rather than actual rendering of INRs, and lacks a concrete approach for efficient visualization. Please consider defining the term \"visualization\" in your application more concretely.\n\nThe choice of 'Basic' as a baseline is also not well-justified, and the high-level presentation of the methodology makes it challenging to fully understand the workings of the approach. Within the paper I only found a short paragraph describing the \"Basic\" algorithm (l. 230-235). A more detailed description, also describing the motivation of why the authors chose this baseline would help the paper.\n\nWhile the use of pruning in adaptive mesh generation is interesting, the paper could benefit from a stronger motivation for choosing pruning specifically as an optimization technique. Could the authors provide more explanation or justification for their choice of pruning as an optimization technique?\n\nFurthermore, an analysis of storage requirements for INRs versus the adaptive mesh is missing; comparing these could provide an insightful 'upper baseline' for memory efficiency. Since INRs can be directly rendered by multiple function evaluations (albeit slowly), it would be valuable to include a performance evaluation of this approach in comparison to the proposed method, especially in the context of interactive visualization. Finally, a time-based evaluation (e.g., comparing pruning-based adaptive mesh refinement versus the Basic approach in mesh construction) would give a more comprehensive view of the method’s efficiency." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* The concept of mesh is a little bit misleading in the paper in my opinion. In the context of INRs, mesh is used to denote a surface represented by a triangle mesh. I think the correct term the paper should use is grid. That would solve other derived term problems. For example, an adaptive mesh is a concept established in Computer Graphics for decades, meaning a triangle mesh that may be subdivided or simplified as needed.\n\n* How the coarse uniform mesh is extracted from the INR in the proposed approach?\n\n* As algorithm 1 runs, there will be different versions of the INR matrices? Each pruning operation results in different layer matrices and the version used depends on which part of the domain is being evaluated.\n\n* I need more details about how the pruning is applied in the algorithm in practice. $\\bar{U} := UT^T$ contains the complexity that was pruned from $W$ and $b$. In other words, the pruned parameters are moved to the next layer. However, all layers should be evaluated when the INR is evaluated, thus the computation complexity still the same in the end. Probably there is an additional step to disconsider $\\bar{U}$ that I did not find in the text.\n\n* The meaning of ID_samples seems confusing. The paper first states that it is the number of samples in the domain to take when computing the interpolation decomposition (Table 1). However, in Algorithm 1 ID_samples seems to be the number of neurons to use for pruning.\n\n* I would like to know the wall time to compute the visualization and how it compares with the non-adaptive visualization.\n\n* Should use an usual metric to compare reconstruction (Chamfer or Hausdorff distance)" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "* I like the presentation. It is self-contained and properly presents the background. The paper is very easy to follow.\n\n* The core idea is simple and easy to implement." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a novel method for visualizing implicit neural representations (INRs) via an adaptive grid evaluation. The core idea is to prune the neural network using an interpolation decomposition." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* The evaluation should follow established procedures in the field. The paper uses datasets that are not usual for evaluation of similar approaches. I recommend the authors to read related papers in detail and use the datasets commonly used in the field. Example datasets include Thingi10K, Stanford, etc.\n\n* The technical contribution is thin. The algorithm proposed may be considered incremental since the interpolative decomposition used is not proposed by the paper. In such a case, I would recommend the authors to focus on finding additional applications and to deeply evaluate the approach. That tasks would help to find additional properties of the representation that may be emphasized in future versions, increasing the manuscript value.\n\n* There are no comparisons with state-of-the-art.\n\n* The related works section is very thin. INRs is a gigantic area. I would advise the authors to start by checking this survey to find the papers they should cite. It is a little bit outdated now, but it is a good starting point. \n\n```\n@inproceedings{xie2022neural,\n title={Neural fields in visual computing and beyond},\n author={Xie, Yiheng and Takikawa, Towaki and Saito, Shunsuke and Litany, Or and Yan, Shiqin and Khan, Numair and Tombari, Federico and Tompkin, James and Sitzmann, Vincent and Sridhar, Srinath},\n booktitle={Computer Graphics Forum},\n volume={41},\n number={2},\n pages={641--676},\n year={2022},\n organization={Wiley Online Library}\n}\n```" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "From a pre-trained implicit neural representation with no access to its training data, we analyze the weight matrices to produce a variable resolution visualization with significant memory savings." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024efficient,\ntitle={Efficient Visualization of Implicit Neural Representations via Weight Matrix Analysis},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0OzDMjPHa3},\nnote={under review}\n}" }, "abstract": { "value": "An implicit neural representation (INR) is a neural network that approximates a function over space and possibly time. Memory-intensive visualization tasks, including modern 4D CT scanning methods, represent data natively as INRs. While such INRs are prized for being more memory-efficient than traditional data on a lattice, discretization to a regular grid is still required for many visualization tasks. We present an algorithm to store high-resolution voxel data only for regions with significant detail, reducing memory requirements. To identify these high-detail areas, we use an interpolative decomposition pruning method on the weight matrices of the INR. The information from pruning is used to guide adaptive mesh refinement, allowing automatic mesh generation, tailored to the underlying resolution of the function. From a pre-trained INR with no access to its training data, we produce a variable resolution visualization with significant memory savings." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Implicit neural representation", "pruning", "visualization", "adaptive mesh refinement" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/0f7e3f349278734d5011ee9384088949e288faaa.pdf" }, "presentation": null, "primary_area": { "value": "other topics in machine learning (i.e., none of the above)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Efficient Visualization of Implicit Neural Representations via Weight Matrix Analysis" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0PC9goPpuz
Compatibility-aware Single-cell Continual Annotation
main
Withdraw
Continual Compatible learning; Single-Cell RNA-seq data
applications to physical sciences (physics, chemistry, biology, etc.)
Yuyao Zhai;Liang Chen;Minghua Deng
~Yuyao_Zhai1;~Liang_Chen5;~Minghua_Deng2
3;3;5
4;4;4
2;3;2
1;2;2
2;3;3
3.666667
4
2.333333
1.666667
2.666667
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": { "value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors." } }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see the Weaknesses section." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper presents a novel framework dubbed scROD that combines sample replay and objective decomposition, addressing the critical issue of catastrophic forgetting in continual learning scenarios. scROD effectively balances the model's ability to retain old knowledge (stability) and adapt to new tasks (plasticity), which is crucial for continual learning systems.\n\n2. The paper evaluates scROD on a variety of benchmarks, including intra-tissue, inter-tissue, and inter-data scenarios, demonstrating its robustness across different annotation challenges. Besides, scROD outperforms existing state-of-the-art methods in scRNA-seq annotation, showing significant improvements in both old and new task accuracies.\n\n3. The article presents its findings with clear and concise figures and tables." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces scROD, a method for continual compatible learning in the context of single-cell RNA sequencing (scRNA-seq) data annotation. scROD employs a combination of sample replay and objective decomposition to address the challenge of catastrophic forgetting, where models typically lose performance on old tasks after learning new ones. By maintaining a memory buffer to store samples from previous tasks and replaying them alongside new data, scROD balances the retention of old knowledge with the acquisition of new information. Furthermore, it decomposes the training objectives into new/old cell type distinction and new cell type distinction, assigning different weights to these objectives to achieve a better trade-off between model stability and plasticity. This approach allows scROD to continuously learn and annotate new cell types over time without forgetting previously learned ones, demonstrating effectiveness through comprehensive experiments on various benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The innovation of this paper is quite common in continual learning, where many methods use replay buffer approaches to tackle catastrophic forgetting (e.g., [R1][R2][R3]). This paper does not show significant differences from those methods or specific distinctions for RNA data.\n\n2. The novelty of objective decomposition is intuitive and easy to understand, which leverage two parameters \\alpha_1 and \\alpha_2 to balance the optimization objectives.\n\n3. The experimental analysis of two learning objectives is trivial, since derivation of Eq. 7 is intuitive. Besides, it seems like that only using L_cur leads to better performance on previous tasks than L_pre, as shown in Figure 3. Thus, why do not just leveraging L_cur instead of L_pre?\n\n4. Although ScROD achieves SOTA performance in various settings, Tables 1, 2 and 3 show that ScROD achieves only a little bit higher than Replay, which is not the compelling evidence of the effectiveness of ScROD.\n\n5. In Figure 5, the first two experiments were performed on inter-data benchmark , and the last two on inter-tissue benchmark. Why not using the same benchmark for all the ablation study?\n\n\n[R1] Maracani, A., Michieli, U., Toldo, M., & Zanuttigh, P. (2021). Recall: Replay-based continual learning in semantic segmentation. In Proceedings of the IEEE/CVF international conference on computer vision (pp. 7026-7035).\n\n[R2] Chaudhry, A., Rohrbach, M., Elhoseiny, M., Ajanthan, T., Dokania, P. K., Torr, P. H., & Ranzato, M. A. (2019). On tiny episodic memories in continual learning. arXiv preprint arXiv:1902.10486.\n\n[R3] Riemer, M., Cases, I., Ajemian, R., Liu, M., Rish, I., Tu, Y., & Tesauro, G. (2018). Learning to learn without forgetting by maximizing transfer and minimizing interference. arXiv preprint arXiv:1810.11910." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. How is scROD different from GEM (Gradient Episodic Memory, Lopez-Paz, D., & Ranzato, M. A., 2017)? Can authors repurpose GEM and compare it with scROD? There are several follow ups for GEM for example \"Adaptive Memory Replay for Continual Learning\" from James et. al 2024 and \"MGSER-SAM: Memory-Guided Soft Experience Replay with Sharpness-Aware Optimization for Enhanced Continual Learning\" from Li et. al 2024 that could be compared against? Can authors theoretically and experimentally compare scROD with these approaches?\n2. Since the manuscript tackles annotating scRNA-seq datasets, are there any practical limitations of this approach, can this be directly deployed by medical practitioners? There already exist many methods for annotation which do no assume access to supervision on query datasets, how did the authors consider online setting relevant to scRNA-seq dataset annotation? Is it possible to get small labeled samples on a query dataset? \n3. Can you make accurate biological inferences from this method? Is it possible to identify genes which cause classification to a particular cell-type?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is clearly written and the experiments are clearly presented to back the claims made by the authors.\n2. The problem is interesting since many single-cell RNA datasets have come up in the past few years. Online learning or transfer learning which ensures that the same network/fine-tuned networks can successfully annotate new data would be a strong contribution to the scientific community." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Authors propose an online learning approach scROD to annotate single cell RNA seq data. scROD uses a memory buffer and a new loss function to preserve classification performance on the past data while being able to annotate newly acquired data at the same time. Authors compare with several baselines and existing methods demonstrating the improvements in continual single-cell annotation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The idea to utilize a memory buffer is a widely-used idea in the reinforcement learning literature (Deep Q-Networks) and even continual learning literature (Gradient Episodic Memory). Therefore the core contribution is not technically novel. There is some novelty to decompose the loss function and consider the impact of different loss functions on the catastrophic forgetting issue in this setting but the results are fairly obvious. For example, when we are training on new datasets, we should ensure class balancing to ensure no classes are compromised which can be achieved with weighing loss function or sampling per class.\n2. There are no past methods that specifically target the problem of continual learning but rather consider query and reference datasets, which I believe is a much harder problem with no supervision available on a query dataset. Therefore comparison with these methods is good to have but unfair to evaluate the utility of scROD." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. A thorough investigation of the continual cell type annotation task.\n2. Provide comprehensive experimental benchmarks for the proposed method and baselines." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents scROD, a method designed to address the challenge of updating automatic cell type annotation models in single-cell RNA-seq (scRNA-seq) data while preventing catastrophic forgetting, where the model's performance on previously learned tasks deteriorates after learning new tasks. To tackle this, the authors introduce the concept of continual compatible learning, which emphasizes maintaining stability on old tasks while adapting to new ones. The proposed scROD method leverages sample replay by using a memory buffer to retain cells from earlier tasks, allowing the model to learn these alongside new tasks. It also separates two training objectives: distinguishing new cell types from old ones and differentiating between newly introduced cell types. By assigning distinct weights to these objectives, scROD achieves a balance between stability and adaptability." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. This manuscript focuses on one task: continual cell-type annotation. My main concern is that the importance of such continual annotation might not be very high. There exists some large-scale atlas and databases covering different species, like Human Cell Atlas, CELLxGENE, Mouse Cell Atlas, Zebrahub, and so on. Those resources cover a large range of tissues and provide cell type annotations. Some of them also curate the annotations with Cell Ontology. In most cases, a simple model pretrained on some atlas, such as CellTypist[1], can handle the annotation of unseen data. Can the authors provide concrete examples of scenarios where continual learning would be necessary or advantageous? What are the limitations of current approaches that continual learning specifically addresses?\n2. Related to bullet 1, currently all the experiments only focus on continual cell type annotation. Have the authors considered evaluating their method on vanilla annotation or zero-/few-shot annotation tasks? How might the proposed method's performance compare to existing methods in these scenarios?\n3. According to the experimental results, the performance of the proposed method is not always better than the baselines. If so, what factors contribute to these performance differences? How do the computational requirements of scROD compare to the baselines?\n\n[1] Domínguez Conde, C., et al. \"Cross-tissue immune cell analysis reveals tissue-specific features in humans.\" *Science* 376.6594 (2022): eabl5197." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@misc{\nzhai2024compatibilityaware,\ntitle={Compatibility-aware Single-cell Continual Annotation},\nauthor={Yuyao Zhai and Liang Chen and Minghua Deng},\nyear={2024},\nurl={https://openreview.net/forum?id=0PC9goPpuz}\n}" }, "abstract": { "value": "As massive well-labeled single-cell RNA-seq (scRNA-seq) data are available sequentially, automatic cell type annotation systems would require the model to continuously update to expand their internal cell type library. However, the model could suffer from the catastrophic forgetting phenomenon, in which the performance of the model on the old tasks degrades significantly after it learns a new task. To enable the smooth upgrading of the system, the model must possess the ability to maintain performance on old tasks (stability) and adapt itself to learn new tasks (plasticity). We call such an updating process continual compatible learning. To adapt to this task, we propose a simple yet effective method termed scROD based on sample replay and objective decomposition. Specifically, we first maintain a memory buffer to save some cells from the previous tasks and replay them to learn together with the next incoming tasks. Then we decompose two different training objectives in continual compatible learning, i.e., distinguishing new cell types from old ones and distinguishing between different new ones, to avoid forgetting the model to varying degrees. Lastly, we assign distinct weights for two objectives to obtain a better trade-off between model stability and plasticity than the coupled approach. Comprehensive experiments on various benchmarks show that scROD can outperform existing scRNA-seq annotation methods and learn many cell types continually over a long period." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": { "value": [ "~Yuyao_Zhai1", "~Liang_Chen5", "~Minghua_Deng2" ] }, "authors": { "value": [ "Yuyao Zhai", "Liang Chen", "Minghua Deng" ] }, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Continual Compatible learning; Single-Cell RNA-seq data" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": { "value": "zhai|compatibilityaware_singlecell_continual_annotation" }, "pdf": { "value": "/pdf/d2f8cb4c3925fd4538249805e99719f43dd9cc36.pdf" }, "presentation": null, "primary_area": { "value": "applications to physical sciences (physics, chemistry, biology, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Compatibility-aware Single-cell Continual Annotation" }, "venue": { "value": "ICLR 2025 Conference Withdrawn Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Withdrawn_Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0PcJAHbSmc
DrivingRecon: Large 4D Gaussian Reconstruction Model For Autonomous Driving
main
Active
4D Gaussian Reconstruction; Autonomous Driving
applications to robotics, autonomy, planning
5;6;6
3;4;4
3;3;3
2;3;2
2;2;3
5.666667
3.666667
3
2.333333
2.333333
1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "**Regarding efficiency and comparison with 3DGS**\n\nWhat is the computational cost to train the model (how many hours on 24 GPUs)? \nHow long does it take to reconstruct a 3D scene representation using your approach during inference? How does the efficiency compare to 3DGS, e.g., StreetGaussian on 256x512?\n\nHow does the realism compare to 3DGS (e.g., StreetGaussian at 256 × 512)? It's okay if it's worse; I'm just curious. \n\n\n\n**On 3D labels**\nWhat is the performance without using 3D bounding boxes at all? I note that you use 3D bounding boxes as prompts for SAM. A label-free approach would make this work more impactful.\n\n**On downstream applications**\nHow is UniAD implemented in Waymo? Would it be possible to conduct your experiments on nuScenes to follow the setting/implementation of UniAD?\n\n**Miscellaneous**:\n* How many frames are in the input during training?\n* In Table 4b, what does \"Training Num\" refer to? Do you mean number of scenes? The PSNR seems quite high compared to Table 3.\n\nSome questions may require additional experiments; please disregard if they're not feasible. However, I'm particularly interested in the efficiency and comparison with 3DGS." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. A generalizable and scalable approach that allows training of large models to learn priors from extensive data, generalizing to novel scenes.\n2. **Almost** no 3D bounding box labels required for dynamic scenes, enhancing scalability.\n3. Detailed explanations and extensive experiments on cross-data evaluation, downstream applications (data augmentation, pretrained model for perception, scene editing)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Unlike previous methods (e.g., 3DGS/NeRF) that require thousands of iterations to reconstruct a scene, this work aims to predict a 3D scene representation using a neural network.\n\n The authors make several design choices to make this pipeline work (PD-block, regularization, 3D encoding, etc.). \n\nExperiments conducted on Waymo demonstrate better performance compared to other generalizable approaches." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Overcomplicated design:\n While I appreciate the effort in developing a generalizable model with dynamic-static decomposition, the model seems quite complex, requiring:\n * Multiple modules (image encoder-decoder, temporal cross-attention, Gaussian adapter, PD block, etc.)\n * Numerous regularization terms\n * Several pretrained models (DepthNet, DeepLab, SAM)\n\n This complexity may hinder downstream applications when used as a pretrained model. For instance, how fast is the model? Is it efficient enough for use in autonomy systems?\n\n2. The realism is still lower compared to optimization-based approaches (e.g., 3DGS), and can only operate on low resolution (256x512) with a limited number of images.\n\n3. (Minor point) The writing seems somewhat rushed, lacking thorough proofreading. Some potential issues:\n * L155, \"corresponding intrinsic parameter E\" should be K\n * L414 \"evaluation on NOTA-DS6\" should be Diversity-54" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "How does the scene edit (Fig.6) work? This procedure can be more detailed." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- It is a very early work that explores learning-based generalizable reconstruction methods for autonomous driving, demonstrating this paradigm could work in real-world driving scenarios.\n- This paper is comprehensive since it not only develops the methods but also incorporates potential applications such as perception and driving tasks.\n- The self-supervised pretraining task is insightful." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a learning-based reconstruction method in a feed-forward manner in driving scenarios. It could predict 4D Gaussian primitives from multi-view temporal input. It is a very early work that explores learning-based generalizable reconstruction and rendering for autonomous driving. This paper also introduces a couple of downstream applications such as model pre-training and vehicle adaptation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- This paper does not demonstrate the model's generalization to different viewpoints. The authors claim the ability of vehicle adaption. However, only the camera intrinsic is changed. Could the predicted 4D Gaussians produce good rendering quality in viewpoints beyond the driving trajectories (different extrinsic)? A recent work[1] explores this direction.\n\n- The resolution is relatively low. The produced rendering quality cannot meet the requirements of practical use, such as camera simulation.\n\n- It would be better to show the inference latency.\n\n- The authors do not provide video demonstrations of the rendering results. It is hard to have a intuitive understanding of the actual performance.\n\n[1] Freevs: generative view synthesis on free driving trajectory." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1.What does “DA-Block” in line 202 refer to? It is not mentioned in the context.\n\n2.Please refer to the questions and suggestions in the Weaknesses part." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1.This paper first explores a feed-forward 4D reconstruction method for surround-view driving scenes, which promotes the development of feed-forward technology in the field of 4D reconstruction.\n\n2.The proposed PD-Block learns to prune and dilate the Gaussian points and allows for Gaussian points that are not strictly pixel-aligned, which is innovative." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a feed-forward 4D reconstruction method that generates 4D scenes from surround-view video inputs in a single feed-forward pass.\nThe method involves 3D Position Encoding, Temporal Cross Attention, Gaussian Adapter, and Prune and Dilate Block. All these modules consist of the feed-forward 4D reconstruction pipeline. \nThe PD-Block learns to prune redundant Gaussian points from different views and background regions and dilate Gaussian points for complex objects, enhancing the quality of reconstruction.\nThis paper also presents rendering strategies for both static and dynamic components, enabling efficient supervision of rendered images across temporal sequences." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.The training process requires depth map ground truth, whereas comparison methods like Pixelsplat and MVSpalt can be trained without it. This reliance on depth ground truth during training restricts its practical applicability.\n\n2.The dynamic objects are decomposed through segmentation and have only few categories (vehicles and people). This approach only separates dynamic and static pixels based on semantics, limiting its ability to achieve comprehensive 4D reconstruction of all dynamic objects.\n\n3.Compared to scene-optimized methods, feed-forward reconstruction provides the advantage of generalization, eliminating the need of test-time optimization for each new scene (though it may lead to some decrease in accuracy compared to the scene-optimized method). In the papers of comparing methods MVSplat and PixelSplat, both of them present running time and memory consumption, demonstrating the efficiency of their feed-forward approaches. However, in this paper, while the authors claim their method is feed-forward, they do not provide an analysis of its running time and memory usage. I recommend including this efficiency analysis and comparing it with other methods to strengthen the evaluation. \n\nBesides, if the authors believe that efficiency is not a concern of this paper, then comparisons with other offline scene-optimized methods (e.g., DrivingGaussian) should be included.\n\n4.If the possible application is to develop real-world simulators in autonomous driving (mentioned in the abstract of the paper), then there is no high requirement for the efficiency of reconstruction, and the existing offline scene-optimized 4D reconstruction method is also acceptable. However, feed-forward does not seem to have an advantage in terms of reconstruction accuracy." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024drivingrecon,\ntitle={DrivingRecon: Large 4D Gaussian Reconstruction Model For Autonomous Driving},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0PcJAHbSmc},\nnote={under review}\n}" }, "abstract": { "value": "Photorealistic 4D reconstruction of street scenes is essential for developing real-world simulators in autonomous driving. However, most existing methods perform this task offline and rely on time-consuming iterative processes, limiting their practical applications. To this end, we introduce the Large 4D Gaussian Reconstruction Model (DrivingRecon), a generalizable driving scene reconstruction model, which directly predicts 4D Gaussian from surround-view videos. To better integrate the surround-view images, the Prune and Dilate Block (PD-Block) is proposed to eliminate overlapping Gaussian points between adjacent views and remove redundant background points. \nTo enhance cross-temporal information, dynamic and static decoupling is tailored to learn geometry and motion features better. Experimental results demonstrate that DrivingRecon significantly improves scene reconstruction quality and novel view synthesis compared to existing methods. Furthermore, we explore applications of DrivingRecon in model pre-training, vehicle adaptation, and scene editing. Our code will be made publicly available." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "4D Gaussian Reconstruction; Autonomous Driving" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/c9990ab223bbe1ff44e3436e307093ea24c62ad9.pdf" }, "presentation": null, "primary_area": { "value": "applications to robotics, autonomy, planning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "DrivingRecon: Large 4D Gaussian Reconstruction Model For Autonomous Driving" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0PxLpVURTl
MIM-Refiner: A Contrastive Learning Boost from Intermediate Pre-Trained Masked Image Modeling Representations
main
Active
self-supervised learning;masked image modeling;instance discrimination;computer vision;contrastive learning
unsupervised, self-supervised, semi-supervised, and supervised representation learning
6;6;6;8
4;4;4;4
3;3;3;3
2;3;3;3
3;3;3;4
6.5
4
3
2.75
3.25
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the weakness. I believe a clear description of the method and experimental setup is one of the most important things when writing a paper (weakness 1).\n\nAdditional question: what does the “relative” in Figure 3(d) mean? Does the value calculated by the performance of ( the i+1 th layer - the i-th layer)?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper first points out the influence of the lightweight decoder on the feature learning of the encoder in MIM methods.\n2. The analyzing part is well-written." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper focuses on bridging the gap between large MIM pre-trained models and SOTA methods. The paper first discovers that MIM models have different types of blocks: those that mainly improve the pre-training objective and others that are responsible for abstraction. Then, the paper proposes a method MIM-Refiner, which adds Instance Discriminator heads on pre-trained MIM models for refinement. The ID heads exploit the intermediate representations to consistently improve the performance of MIM pretrained models. While the performance gains on large dataset full-finetuning are small, the proposed methods show remarkable gains on few-shot settings." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The description of the method and experimental setup needs to be clarified. (a) Which blocks need to be fine-tuned during refinement, or do all blocks need to be fine-tuned? (b) How many epochs are needed to refine different models? (c) What is the structure of the ID head? Answers to all these questions should be contained in the manuscript.\n2. Unfair comparison. The paper misses an important baseline - train the original model with 0 heads with the same epochs to demonstrate the importance of refinement (instead of just training more epochs).\n3. Some typos. L267-269, see Table 1 instead of Figure 3b." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to weakness." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper proposes a detailed analysis of the blocks of MIM models in which different blocks extract features with a specific focus and the most efficient features learned by MIM are from the middle blocks.\n\n2. A contrastive learning-based method called MIM-Refiner is proposed to refine the representation of current MIM models by attaching the middle layers with ID objective. \n\n3. Experimental results show the effectiveness and generalization ability of MIM-Refiner on downstream tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a contrastive learning boosting method called MIM-Refiner to refine the features of pre-trained MIM models. MIM-Refiner leverages multiple instance discrimination heads (ID) which are connected to different immediate layers. Each ID head contains a contrastive loss that captures semantic information to improve the quality of learned representations. By training a few epochs, the features of MIM-Refiner surpass the current MIM models on multiple experiments: on ViT-H with data2vec 2.0 on ImageNet-1K, the accuracy of the proposed method reaches state-of-the-art performance on linear probing and low-shot classification." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. As the discussion of end-to-end training, the proposed method MIM-Refiner seems to be a two-step training method, with first step training MIM models and fine-tuning the updated models by incorporating ID heads to middle layers. Practically, this might increase the complexity of the training paradigm and deployment. Is it possible to improve the proposed method with end-to-end training on MIM and ID? If not, what are the potential bottlenecks to circumvent this goal?\n\n2. There is no overview diagram that shows the detailed architecture of MIM-Refiner or how the training diagram goes. The diagram in Figure 4 provides partial information but does not clearly illustrate these points." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "---\nSince D2V2 is used as a baseline, does the representation degradation issue also appear in the audio and language domains?\n\n---" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "---\n\n## **Strengths**\n\n- The paper is well-written, with clear observations, a well-developed motivation, a straightforward idea, a clearly-stated method, extensive experiments, and comprehensive analysis.\n \n- It effectively identifies the representational degradation phenomenon in large visual foundation models pre-trained with MIM self-supervised learning (SSL), providing evidence through multiple experiments.\n \n- The proposed method offers a simple and effective solution to prevent this issue and improve the representation quality of MIM SSL.\n\n- Rigorous experiments and analysis are conducted to show the success of the proposed method, with large improvements over current SOTA.\n\n---" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "---\n\n## **Summary**\n\nThe paper identifies the representation degradation issue in Masked Image Modeling (MIM)-pretrained large foundation models. To address this, the authors propose a simple yet effective method to prevent degradation and further improve the representation quality of MIM methods by adding auxiliary contrastive losses to the last layers of Vision Transformers (ViTs) on top of the MIM objective. The paper provides improved performances with large margins over current state-of-the-art (SOTA) methods through extensive experiments and rigorous analysis, demonstrating the success of the proposed approach.\n\n---" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "---\n### **Limitations**\n\n1. To prevent representation quality degradation in the last layers of ViTs, the authors experiment with contrastive loss, which requires constructing a queue/pool for positive and negative samples. I noticed the proposed method uses a top 20-NN approach to retrieve positive samples in the queue, which could contribute significantly to the increased training time per step. What's the queue size used? how much does it contribute to the increased training time per step?\n\n2. Since the paper emphasizes preserving the richness of representations, evaluation on dense prediction tasks such as object detection and instance segmentation (OD/IS) would be valuable, in addition to the provided segmentation probing on ADE20K.\n\n - It would be meaningful to compare the performance of MIM-refiner-pretrained ViT-L on COCO object detection against MAE-pretrained ViT-L following the ViTDet framework [1].\n\n - [1] Li, Y., Mao, H., Girshick, R., & He, K. (2022, October). *Exploring plain vision transformer backbones for object detection.* In European Conference on Computer Vision (pp. 280-296). Cham: Springer Nature Switzerland.\n\n---\n\n### **Recommendation**\n\nConsidering the strengths and weaknesses discussed above, my recommendation for this paper is **ACCEPT**. This is a strong paper with a clear contribution." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "see weaknesses." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "S1: This paper is well-written and easy to follow.\n\nS2: This paper is not the first to point out that the encoder in MIM methods partially performs image encoding and representation learning. A similar conclusion is also discussed in [A], highlighting that MIM methods using a single ViT structure tend to face this issue. The reviewer previously conducted experiments on MAE-B, showing that introducing an additional decoder can effectively alleviate this problem. This paper demonstrates that, for methods like MAE that use an asymmetric encoder-decoder architecture, especially in larger models, a small decoder cannot fully decouple encoding and decoding, providing academic insights.\n\nS3: This paper proposes a simple and effective MIM-Refiner method, refining the later blocks of MIM models to enhance MIM representations effectively.\n\n[A] Context Autoencoder for Self-Supervised Representation Learning." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces MIM-Refiner, which leverages contrastive learning to boost MIM models. The proposed method is simple and has demonstrated effectiveness in few-shot image classification tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "W1: Existing work [A] has shown that fine-tuning MIM models can enhance their representation capability (for image classification), but the improvement under full fine-tuning is minimal. Additionally, MAE has demonstrated significant transfer performance on dense prediction tasks [B] (object detection/instance segmentation). Fine-tuning MIM models with contrastive learning methods is unlikely to bring substantial improvement and may even negatively impact performance.\n\nW2: Current vision foundation models, such as DINOv2, exhibit strong patch-level representation learning capabilities and combine MIM and CL. Their learned representations have shown effectiveness in tasks like image classification, pixel classification, and depth estimation. Although this paper discusses the relationship between MIM-Refiner and these models, suggesting that MIM-Refiner can build on them for further improvement, I am concerned that MIM-Refiner may degrade pixel-level representation performance for tasks like semantic segmentation or depth estimation (especially when the backbone is fixed).\n\n[A] Layer Grafted Pre-training: Bridging Contrastive Learning And Masked Image Modeling For Label Efficient Representations.\n\n[B] Exploring Plain Vision Transformer Backbones for Object Detection." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We introduce MIM-Refiner, a refinement process of MIM(masked image modeling)-models using an ensemble of instance discrimination heads attached at intermediate layers to leverage the best pre-trained MIM representations." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024mimrefiner,\ntitle={{MIM}-Refiner: A Contrastive Learning Boost from Intermediate Pre-Trained Masked Image Modeling Representations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0PxLpVURTl},\nnote={under review}\n}" }, "abstract": { "value": "We introduce MIM (Masked Image Modeling)-Refiner, a contrastive learning boost for pre-trained MIM models. MIM-Refiner is motivated by the insight that strong representations within MIM models generally reside in intermediate layers. Accordingly, MIM-Refiner leverages multiple instance discrimination (ID) heads that are connected to different intermediate layers. In each head, a nearest neighbor ID objective constructs clusters that capture semantic information which improves performance on downstream tasks, including off-the-shelf and fine-tuning settings.\n\nThe refinement process is short and simple - yet highly effective. Within a few epochs, we refine the features of MIM models from subpar to state-of-the-art, off-the-shelf features. Refining a ViT-H, pre-trained with data2vec 2.0 on ImageNet-1K, sets a new state-of-the-art in linear probing (84.7\\%) and low-shot classification among models that are pre-trained on ImageNet-1K. MIM-Refiner efficiently combines the advantages of MIM and ID objectives, enabling scaling ID objectives to billion parameter models using relatively little compute. MIM-Refiner compares favorably against previous state-of-the-art SSL models on various benchmarks such as low-shot classification, long-tailed classification and semantic segmentation." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "self-supervised learning", "masked image modeling", "instance discrimination", "computer vision", "contrastive learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/0509fb34d907aea59cc703a3733218e57979ef33.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/420c481f526b73c07e05378095a6a350e93fa7d8.zip" }, "title": { "value": "MIM-Refiner: A Contrastive Learning Boost from Intermediate Pre-Trained Masked Image Modeling Representations" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0QJPszYxpo
Extended Flow Matching : a Method of Conditional Generation with Generalized Continuity Equation
main
Active
Flow Matching;Generative Model
generative models
3;3;5;5;6
5;4;3;4;3
3;1;2;3;2
2;1;2;2;3
2;1;2;3;2
4.4
3.8
2.2
2
2
-0.801784
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. Can the authors consider providing definitions before introducing a new notation in the text?\n2. What is the effect of defining $\\pi$ using plans built using batched samples? Would the vector/matrix field learned change as a function of the batch size? \n3. What kernels do the author use for the RKHS used to construct paths?\n4. In lines 212-214 and lines 220-222, can the authors clarify the output of $u$?\n5. the discussion about the weak assumption of measurability and continuity of $p(x|c)$ with respect to $c$ requires clarification, particularly since piece-wise continuous functions are measurable as well." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The authors identify an interesting problem: observing the conditioning vector in a number of domains can be hard or expensive. The proposal of integrating along paths between different marginals is also interesting, a similar proposal is studied in [Albergo et al 2023]." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose extended flow matching (EFM) for conditional sampling and style transfer using flow matching. EFM consists of \n\n1. learning a field which also uses the conditioning vector $c$ as input, which the authors call a matrix field. \n2. The authors then integrate the learned field $u(x, t, c)$ along different paths $\\gamma: [0, t] \\rightarrow [0, 1] \\times C$, where $C$ is the set of conditioning vectors. \n 1. For instance, for conditional generation the authors propose integrating along the path $\\gamma(t) = (t, c)$, which reduces to conditional flow matching. \n 2. For style transfer, the authors integrate along the path $\\gamma(t) = (1, (1-t) c_1 + t c_2)$. Since integrating along $\\gamma(t)$ can be out of domain for models learned trained just on pairs $x, c \\sim p(x, c)$, the authors propose a learning algorithm such that the field $u_\\theta$ also observes such paths during training. \n\nThe authors propose learning such a field $u$ using optimal transport:\n\n1. the authors propose learning an optimal plan similar to [Lipman et al 2023]\n2. instead of using linear interpolation between different points on a path, the authors extend the set of paths to include functions belonging to an RKHS." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. While the motivation of EFM was to provide ensure that the learned network $u(x, t, c)$ is smooth with respect to the conditioning vector $c$, the authors do not address how imposing smoothness can allow extrapolation to conditioning vectors not seen during training. \n2. Could the authors explain why the multi-marginal optimal transport approach allows for extrapolating to conditioning vectors not seen during training?\n3. The authors should also consider including other works that learn multi-marginal flow models? For instance, [Albergo et al 2023] propose learning multi-marginal flows and present a learning algorithm for optimizing the paths such that the transport cost in $W_2$ metric is minimized. \n4. [Albergo et al 2023] also propose a much more general algorithm for including paths between samples from an arbitrary number of marginal distributions, available during training. \n5. The experiments section can be improved by adding extra text explaining the results and the figures, particularly in figure 4.\n\n\n[Albergo et al 2023] Albergo, M.S., Boffi, N.M., Lindsey, M. and Vanden-Eijnden, E., 2023. Multimarginal generative modeling with stochastic interpolants. arXiv preprint arXiv:2310.03695." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Major:\n\n1.\tCould the authors explain or give an intuition about the regression in MMOT (Eq. 3.4)?\n\n2.\tCould the authors show the extrapolation ability of their methods in a more realistic application of EFM, e.g. style transfer of images?\n\nMinor:\n\n1.\tAt the end of Line 311, “focus on the” is misspelled as “focus ton he”.\n\n2.\t“ConvHull” should be explained." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The theory of integrating property conditions and time in flow matching is highly innovative, and the authors developed MMOT to perform optimal transport within this space." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Flow matching can generate different data distribution given different desired property conditions. The authors proposed the extended flow matching (EFM) which introduces a continuous mapping from a joint continuous space of time and property conditions to corresponding data distribution, which enables smooth shifts between different conditions. The authors also extended optimal transport to Multi-Marginal Optimal Transport (MMOT) for multiple property conditions. They validated their method on a 2D toy model and conditional molecular generation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The experimental evidence is insufficient." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "When is MMOT-EFM and EFM in general expected to work better than COT-FM / Bayesian-FM? I know there is a short explanation on the differences in assumptions but it is difficult for me to translate what is gained when making a piecewise continuous assumption on p(x|c) vs. a measurability assumption. It’s not clear to me how this compares to these prior works in general.\n\nSmall comments that don’t affect the score: \nThere appears to be an unfinished section D.5 in the appendix. \nGG-EFM isn’t defined in the main text. \nI didn’t understand the distinction between p_c and p_{0,c} line 170. \nTypo on line 311 “ton he”\nShr\\”odinger to Schr\\”dinger line 425\nThe source points in Figure 4 b and c (and corresponding appendix figs) are essentially invisible (grey against a grey background). It would be **really nice** to fix this. \n\n\n### Overall\nI think this work presents an interesting idea with promise to understand how these models generalize to unseen conditions. However, this is not explored theoretically. In addition the current method does not scale to practical settings at the moment. I think further investigation as to when the assumptions behind this method make sense relative to other methods would greatly strengthen this work. A better understanding of how this relates to prior literature and when this method is preferable would likely change my opinion of this work." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* Understanding how to extend current generative models to more general conditionals (especially unobserved conditionals) is an important problem particularly in the sciences. \n* I enjoyed the symmetry of the presentation of first standard flow matching and OT-CFM settings followed by EFM and MMOT-EFM settings. Table 1 is great to understand the difference to OT-CFM. \n* To the best of my knowledge the theory is correct and answers some of my questions on how one might generalize flow matching to condition-augmented spaces." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes an extension to flow matching to conditional generation on unknown (but related) conditions using a flow on both the data space and the condition space. A variant of this based on multi-marginal optimal transport is proposed as an extension to optimal transport conditional flow matching. 2D and conditional molecular generation experiments are performed showing conditional generation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* It would be great to make clearer to the reader how this method extends to unseen conditions. I think lines 402-405 kind of get at this, but I would have loved to see more emphasis on this point. It is very easy to design a conditional generative model that technically extends to unseen conditions, but it is much more difficult to enforce that that model extends in a reasonable way. EFM has the potential to guide that extension and I would love to see that point explored further.\n* The algorithm is not yet useful in real applications. While the authors also acknowledge this, it’s still a large limitation of the impact of this work. The molecule experiment is extremely limited in terms of comparisons to existing work and overall training setup.\n* Much of the theoretical statements are direct extensions from prior work." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Could you please justify the ZINC-250k experimental design?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The Extended Flow Matching sounds novel and the authors show the newly introduced conditional components in Fig.1, which is quite intuitive.\n\n2. I like the well-structured theoretical discussion from FM to EFM, this can help domain experts grasp the main contribution and difference between the existing OT-CFM and the proposed MMOT-EFM" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "To achieve extrapolation beyond observed conditions, the authors proposed Extended Flow Matching (EFM) framework that is developed upon the Conditional generative modeling. Specifically, the authors introduced a novel algorithm called MMOT-EFM derived from the Multi-Marginal Optimal Transport (MMOT). In the experiments, the authors showed improved MAE over compared FM-based methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. I feel concerned about the experimental design. For instance, the authors introduce a rather usual setting (Appendix 1300-1306). Though it aligns well with the synthetic point cloud experiments, it is quite different from the common practice [1]. \n\n[1] Ketata M A, Gao N, Sommer J, et al. Lift Your Molecules: Molecular Graph Generation in Latent Euclidean Space[J]. arXiv preprint arXiv:2406.10513, 2024.\n\n2. I think critical experiments against highly related OT-CFM methods are missing in this version. \n\nAlexander Tong, Nikolay Malkin, Guillaume Huguet, Yanlei Zhang, Jarrid Rector-Brooks, Kilian\nFatras, Guy Wolf, and Yoshua Bengio. Improving and generalizing flow-based generative models\nwith minibatch optimal transport. arXiv preprint 2302.00482, 2023b." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "see weaknesses" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "I find the motivation very clear: Sometimes we already know the posteriors for several conditions (for instance in molecular dynamics, where we obtain some posterior samples via MCMC), and want to \"smartly\" interpolate between the conditions, i.e., learn a generative model which walks along \"generalized \"geodesics in the space of probability measures. I also like that the authors were very rigorous in their theorems and motivation for the developed algorithm." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces extended flow matching a new flow matching based method, that is designed for conditional generation. For this, the authors make use of the generalized continuity equation by Lavenant. The authors show that their proposed loss indeed has the correct gradients, i.e., regresses onto the true velocity field of the generalized continuity equation. The algorithm consists \"learning\" an interpolation via kernel regression (which is needed since \"straight paths\" are not the only viable solution anymore), and then regressing onto a flow matching loss where the is now matrix-valued. This is a generalization of the usual inverse problems framework of flow matching. Further, the authors showcase the effiacy of their algorithm via a toy example and conditional molecular generation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "However, the glaring weakness is that there is not clear cut numerical use case shown. I would like to see a not toyish example where we actually need several conditions and the transport between them. Usually, in the classical inverse problems works there is an implicit geodesic path taken where $y_t = t y + (1-t)y$, since one does not need to alter the condition if posterior sampling is the ultimate goal. If one wants to do style transfer (which seems to be the second motivation of this paper), then one can simply use a conditional FM network which receives the two conditions (source and target) as inputs. Therefore, while theoretically neat I am not convinced of why the generalized continuity equation and a network which moves efficiently also in the condition space, is advantageous. The authors can convince me by providing a clear example where either i) the classical conditional algorithms are not applicable or ii) this approach significantly outperforms the other flow matching models. \n\nI also have some smaller concerns. \n\n1) The scaling in $N_c$ and condition dimension seems to be bad. can you provide the run times for the molecular example also for the baselines? it only says in the appendix that they were completed within 4 hours, but I expect the baselines to train much quicker. Also latent space of a VAE is pretty low dimensional. Please provide training your conditional flow matching model on MNIST (no VAEs..), where the condition space is not discrete (i.e., for instance inpainting). Even if this does not fit your motivation, I would like to see the results in such a more standard example and this would improve my confidence in the scalability. \n\n2) Appendix D5 and F are empty (or almost empty). \n\n3) you do not seem to provide any code. I find the algorithm description to be not perfectly clear, there I would very strongly suggest that you at least publish code for the toy example. \n\n4) I believe that the example 7.1 is meaningless. You construct a random example with sparse conditions. Then you show, that your algorithm performs better on the OOD. But basically you can construct an inverse problem which aligns with your in distribution posteriors and does anything else on the OOD data. Of course I am aware that your point is that your algorithm is minimizing the Dirichlet energy and you measure the distribution induced by this. However, it is not clear to me if this is the theoretically optimal thing to do (wrt to Wasserstein). I am guessing that your algorithm computes something like Wasserstein barycenters weighted by some distance to the known conditions? Please clarify why the minimization of the generalized Dirichlet energy should yield theoretically sound posteriors. \n\n5) The manuscript is sloppy at times when discussing related work. \"The authors in (Wildberger et al., 2023; Atanackovic et al., 2024) developed FM-based models to estimate the posterior distribution when the prior distribution p(c) of conditions is known. In contrast, our approach tackles situations where the conditions can only be sparsely observed, and the prior distribution is unknown.\"\n\nThe prior distribution p(c) is not known in (Wildberger et al, 2023). They are only able to sample from the joint distributions (c,x), but this does not mean that you can evaluate it. Further, their algorithm can very easily be adapted to the setting you described. If one has posterior samples for sparse conditions $c_i$ one can simply do the joint training over $(x_{i,j}, c_i)$.\n\n6) when style transfer is one of the main modes of motivation, I would also like to see an example of it. \n\nOverall, I appreciate the idea and think that it has merits, but the execution prevents me from accepting it in the current form. I would love to see a practical example, where the main motivation of your algorithm becomes clear. Furthermore, providing a more standard inverse problem on MNIST (with no encoder/decoder) and a continuous condition space would show me that your algorithm at least somewhat scales. If these problems are discussed/solved, then I am willing to raise my score." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a novel framework for continuous conditional generative models that extends Flow Matching with a generalized continuity equation." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024extended,\ntitle={Extended Flow Matching : a Method of Conditional Generation with Generalized Continuity Equation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0QJPszYxpo},\nnote={under review}\n}" }, "abstract": { "value": "Conditional generative modeling (CGM), which approximates the conditional probability distribution of data given a condition, holds significant promise for generating new data across diverse representations.\nWhile CGM is crucial for generating images, video, and text, its application to scientific computing, such as molecular generation and physical simulations, is also highly anticipated.\nA key challenge in applying CGM to scientific fields is the sparseness of available data conditions, which requires extrapolation beyond observed conditions.\nThis paper proposes the Extended Flow Matching (EFM) framework to address this challenge.\nEFM achieves smooth transitions in distributions when departing from observed conditions, avoiding the unfavorable changes seen in existing flow matching (FM) methods.\nBy introducing a flow with respect to the conditional axis, EFM ensures that the conditional distribution changes gradually with the condition.\nSpecifically, we apply an extended Monge--Kantorovich theory to conditional generative models, creating a framework for learning matrix fields in a generalized continuity equation instead of vector fields.\nFurthermore, by combining the concept of Dirichlet energy on Wasserstein spaces with Multi-Marginal Optimal Transport (MMOT), we derive an algorithm called MMOT-EFM.\nThis algorithm controls the rate of change of the generated conditional distribution.\nOur proposed method outperforms existing methods in molecular generation tasks where conditions are sparsely observed." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Flow Matching", "Generative Model" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/12b10f8ed8d135b45bc192fc1df4afd0972e8a69.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Extended Flow Matching : a Method of Conditional Generation with Generalized Continuity Equation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0QZcoGdmtJ
Auditing $f$-Differential Privacy in One Run
main
Active
Differential privacy;Auditing privacy
alignment, fairness, safety, privacy, and societal considerations
3;6;8
5;3;3
3;3;3
3;3;3
1;2;2
5.666667
3.666667
3
3
1.666667
-0.917663
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "Questions:\n* You claim that your approach achieves tighter results as the number of canaries increases, outperforming the empirical privacy results from Steinke et al. (2023), suggesting that the results can be tight as we increase the number of canaries. Could you elaborate on why your bounds continue to improve with more canaries while the bounds in previous work degrade? What underlying mechanisms in your algorithm contribute to this improvement? Citing the authors: ” Figure 1 demonstrates that our approach outperforms the empirical privacy results from Steinke et al. Interestingly, while the bound in Steinke et al. (2023) degrades as the number of canaries increases, our bounds continue to improve.”\n* What potential sources contribute to any lack of tightness in your lower bounds? Are there specific aspects of the f-DP framework or your implementation that introduce looseness? How might these be addressed in future work to enhance the tightness of the bounds?\n* How does your algorithm perform in the black-box setting compared to the white-box setting? Can you provide detailed experimental results illustrating this performance?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* **Advancement of f-DP Tools**: The paper contributes to the understanding and practical application of f-DP, which could be of independent interest.\n* **Interesting Problem**: Auditing DP mechanisms in a one-run scenario is interesting for practical implementations (particularly in the black-box scenario, see the weakness section), and the paper makes significant progress in this area.\n* **Experimental Validation**: The experimental results are compelling and demonstrate the effectiveness of the proposed approach.\n* **Versatility in Adversarial Models**: Extending the auditing algorithm to handle different adversaries, such as reconstruction attacks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a novel algorithm for auditing differential privacy (DP) mechanisms in a single run, building upon and extending the work of Steinke et al. (2023). By leveraging the theoretical framework of f-Differential Privacy (f-DP), the authors provide tighter lower bounds on privacy leakage, thereby enhancing the existing toolbox for f-DP analysis. Notably, their auditing algorithm can be applied to various adversaries beyond the standard membership inference, such as reconstruction attacks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The authors investigate exciting problems and provide interesting results. I encourage the authors to continue working on these results, as they are sound and exciting to the DP community. However, I don’t think the work is ready to be published in its current form, as it is somewhat rushed. I sketch my main concerns below.\n* **Writing and Presentation Quality**: The manuscript contains several errors and unclear explanations. The authors should revise it before publication, as there are plenty of writing errors and bad citing style.\n* **Unreferenced Figures and Results**: Some results, particularly those in Figure 7, need to be adequately referenced or explained within the text, leading to confusion about their significance.\n* **Incomplete Explanation of Gaps**: The paper needs to explain the gaps between theoretical and lower bounds. Possible reasons for these gaps should be analysed, such as limitations of the f-DP framework, assumptions made in the analysis, or practical considerations in implementation.\n* **Insufficient Experimental Details**: There are no experiments in the black-box setting for which we are compelled to use one-shot auditing. The white-box setting enjoys a tight and efficient auditing algorithm (Nasr et al., 2023), while the black-box algorithms are rather expensive." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- On the section “Empirical Privacy” line no 307, why do the trade off curves need to be ordered? If you have a set of trade off curves $f_i$ that pass couldn’t you build a new trade off curve $f(x) = \\min_i f_i(x)$ \n- In what sense are the empirical results tight in Fig 7 and why is that not also evident in Fig 1?\n- Can you explain why abstentions are important in this algorithm?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Valuable Contribution to Existing Research: There has been extensive work on auditing differential privacy guarantees. This paper distinguishes itself by offering a solution that enhances both computational efficiency and the precision of empirical privacy guarantees. The reliance on multiple runs of the mechanism has been a major obstacle to the widespread application of auditing methods. Their approach, requiring only a single run, makes auditing significantly more practical, especially for complex machine-learning algorithms involving extensive model training.\n- Using the $f$-DP framework is a particularly strong aspect of this work. $f$-DP offers a more general and accurate representation of a mechanism's privacy compared to traditional approximate differential privacy. This choice allows for a more fine-grained and robust analysis of privacy. The authors convincingly demonstrate that auditing $f$-DP leads to tighter empirical privacy assessments. By performing the analysis in a single training run, the paper achieves a more comprehensive understanding of the privacy implications within a practical computational framework." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a novel algorithm designed to audit $f$-DP guarantees within a single execution of a mechanism.\nThis area of research has become increasingly significant within the privacy community, particularly due to the limitations of existing auditing mechanisms.\nExisting empirical auditing methods are either computationally expensive (requiring multiple runs of the machine learning algorithm) or fall short in providing a tight empirical privacy guarantee.\nThe need to run the mechanism multiple times has hindered practical applications.\nSteinke et al. (2023) introduced a pioneering approach that balances the number of runs with the tightness of the audit.\nThis present work enhances this trade-off further by auditing $f$-DP guarantees, which provide a more precise representation of a mechanism's privacy compared to traditional approximate DP parameters." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The main weakness of this paper is its presentation. The write-up seems very rushed which at times hinders the flow of the reader. Many references are broken e.g. reference to Algorithm B. Lines 300-312 contain many typos and incomplete sentences. These are issues that can be addressed quickly but in the current state I would argue that the presentations limits the value of this work to the community.\n- The authors have not provided a code artifact. While the contributions of this work are mostly theoretical, the implementation of the algorithm requires care and it would help reproducibility if a code artifact were supplied." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1) What do you mean by \"gubernatorial analysis\"? (Line 95)\n2) Do you have an intuition why the bound in Steinke et al. (2023) degrades with higher numbers of canaries while your bounds continue to improve?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is well-motivated and, for the most part, clearly written. It provides a notable improvement over prior privacy auditing techniques." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a computationally efficient privacy auditing procedure by leveraging the f-DP curve, and shows that the resulting lower bounds are tighter than those of previous work." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper contains some ambiguities and cosmetic errors that should be addressed to improve clarity and overall presentation.\n1) clarify that by \"one run\" you mean a training run (rather than an inference run)\n2) explicitly state the limitation of Steinke et al. (2023) that you are addressing (in Line 80-82)\n3) change the references to algorithm 3.1 to algorithm 3 (given that that is what the algorithm is called)\n4) remove double mentions of Steinke et al. by just using the reference instead (e.g., in Line 420)\n5) fix the reading flow in Definition 6 (second bullet point is not a full sentence)\n6) correct typos (e.g., Line 194/195, 307, 466, 505/506) and wrong capitalizations in the middle of sentences (e.g., Line 100)" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We use trade-off functions to perform tighter auditing of algorithms designed to satisfy differential privacy in a single run." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024auditing,\ntitle={Auditing \\$f\\$-Differential Privacy in One Run},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0QZcoGdmtJ},\nnote={under review}\n}" }, "abstract": { "value": "Privacy-preserving machine learning requires carefully designed and rigorously analyzed algorithms. However, such designs and analyses are often susceptible to errors or imperfections, leading to mechanisms that may not offer the expected level of privacy due to mathematical inaccuracies or implementation flaws. Conversely, some mechanisms might provide stronger privacy guarantees than can be proven through a loose analysis. Empirical privacy auditing has emerged as a means to address this gap. Existing auditing mechanisms, however, are either inefficient—requiring multiple runs of machine learning algorithms—or suboptimal in calculating the empirical privacy of these algorithms. In this work, we present a tight and efficient auditing procedure and analysis that can effectively assess the privacy of mechanisms. Our approach requires only a single run of the mechanism and achieves tight empirical privacy by leveraging the $f$-DP curve, which provides a more accurate measure of privacy than the traditional $\\epsilon,\\delta$ parameters. Experiments demonstrate that our auditing algorithm delivers tighter empirical privacy guarantees." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Differential privacy", "Auditing privacy" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/803c0af3ab3dbae41b9b19d6db963d36fe59243e.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Auditing $f$-Differential Privacy in One Run" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0QePvFoqY6
IncEventGS: Pose-Free Gaussian Splatting from a Single Event Camera
main
Active
3D Gaussian;Event Camera
applications to computer vision, audio, language, and other modalities
3;5;5;5
5;4;4;4
3;4;2;2
3;3;3;2
2;2;2;3
4.5
4.25
2.75
2.75
2.25
-1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see the weaknesses. I have assigned a preliminary score based on the initial manuscript, but I may adjust this score based on the authors' responses and feedback from other reviewers." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The topic of event-based 3D reconstruction without camera pose is very interesting topic.\n\n2. The authors conducted extensive experiments demonstrating that IncEventGS outperforms previous NeRF-based methods and other baselines, even without ground-truth camera poses.\n\n3. The writing is clear and easy to understand." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes IncEventGS, an incremental dense 3D reconstruction method using a single event camera. To incrementally recover the 3D scene, IncEventGS leverages the tracking and mapping approach of traditional SLAM. The tracker first estimates initial camera motion from prior 3DGS reconstructions, while the mapper refines both the 3D scene and camera motion using the tracker’s motion trajectory estimates. The advantage of IncEventGS does not require any ground truth camera poses. The results show that IncEventGS outperforms prior NeRF-based methods and related baselines, even without ground-truth camera poses. Additionally, it surpasses SOTA event-based VO methods in camera motion estimation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. While I acknowledge that this paper is the first to explore 3D reconstruction using a single event camera combined with 3D ground segmentation without camera poses, its novelty appears to be limited. There are existing works using traditional RGB cameras for 3D reconstruction without relying on camera poses, and the approach of directly accumulating events into event images does not clearly highlight significant contributions to the field, whether from the image-based 3D ground segmentation community or the event-based community. I encourage the authors to articulate the specific technical contributions of this work.\n\n2. I recommend that the authors include more examples of extreme scenarios, such as high-speed motion and low-light conditions, alongside comparisons with RGB images. This could better demonstrate the advantages of using event cameras for 3D reconstruction.\n\n3. Regarding the possibility of achieving colored 3D reconstruction, can this method be applied? Since there are existing color event cameras, could the authors obtain data from such cameras to create an example of colored reconstruction?\n\n4. The writing could be further improved in several ways: a) The title in line 97 should be bolded and capitalized. b) Section 3.2 does not require an extensive explanation of event camera principles and image accumulation. c) The font sizes in Tables 1 and 2 should be made consistent." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "There are no concerns regarding ethics" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1) Although this type of work is new in the area of event cameras, why is there no mention of other pose-free camera work in the field of frame-based cameras?\n\n2) Since the document only expresses high-level ideas, are there any plans to make the code publicly available in the future?\n\n3) Why are there no supplementary videos supporting the results shown in the manuscript?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1) An original concept in which 3D-GS and camera poses are optimized simultaneously.\n\n2) Results that surpass the current state-of-the-art." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "1) This manuscript presents a method in which a 3D scene is reconstructed using a single event camera and 3D-GS. The authors describe a process where the 3D scene reconstruction does not require provided camera poses. The 3D-GS parameters and camera poses are simultaneously calculated, using a concept similar to SLAM, but generating a dense 3D scene.\n\n2) The presented method produces results that outperform the current state-of-the-art by a significant margin." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The manuscript is clearly written but does not explain in a precise and in-depth manner how it is carried out. In other words, the concepts expressed are only shown at a high level, without delving into small key details, such as the “continuous time trajectory parameterization” or how “the camera poses (T_k) and (T_{k+\\Delta t}) can be interpolated,” and how exactly to “render two grayscale images (i.e., (\\hat{I}k) and (\\hat{I}{k+\\Delta t})) from the previously recovered 3D-GS,” which makes it very difficult to reproduce the results.\n\nAlthough the manuscript mentions some studies related to 3D-GS and event cameras, it does not mention 3D-GS works that perform 3D reconstruction or novel view synthesis with pose-free cameras and frame-based cameras." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Why did IncEventGS stop using the ground-truth pose after adopting Gaussian Splatting representations compared to Nerf-based representations?\n\n2. As we know, 3DGS hardware friendliness is superior to Nerf-based representations, and I'm curious about the overall runtime of the system compared to Nerf-based.\n\n3. More experiments need to be done, such as Tanks and Temples." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "1. The results of IncEventGS shown in Tabs 1 and 2 are amazing and effective.\n\n2. This paper is written in an easy-to-understand manner." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes IncEventGS, which is an incremental 3D Gaussian Splatting reconstruction algorithm with a single event camera. IncEventGS employed the tracking and mapping paradigm of conventional SLAM pipelines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The motivation of this paper is weak, as the paper claims, \"Due to its unique asynchronous and irregular data capturing process, limited work has been proposed to apply neural representation or 3D Gaussian splatting for an event camera\". I think the authors should discover the reasons behind it rather than the superficial phenomenon.\n\n2. The title is \"Pose-free.\" Why the author did this is not explained. I think that although no pose ground truth is provided, using conventional slam pipelines actually provides this variable implicitly. Conventional slam pipelines will be more robust than pose estimators, which use deep learning methods.\n\n3. This paper mentions several times “due to its time continuous, sparse, asynchronous and irregular data capturing characteristics.” I don't think the authors have solved this problem; they are still taking the approach of stacking events into the frame.\n\n4. In line 62, citation duplication.\n\n5. The contribution needs to be rewritten, which is just like changing the representation from Nerf to GS. However, this work has already been done.\n\n6. \"The main insight of IncEventGS is to accumulate incoming event data into chunks and treat each chunk as a special \"image\". This is not a contribution and does not need to be emphasized.\n\n7. In line 216 and 307, C in (3) and equation 6." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The re-initialization using the pre-trained depth model for regularization with the proposed Gaussian model is not clear. Can the authors provide more details about it? Especially regarding the visualization of the intermediate process.\n\n2. For SLAM or VIO, the accuracy of the trajectory is crucial. However, for NVS (Novel View Synthesis) tasks, the proposed method merely reconstructing a gray map of the scene can diminish the significance of the task to some extent. It is not enough to work only on the gray map. Could we perform the NVS task on the RGB event dataset? For example, the dataset from [1] or [2] or the event-based color synthetic Replica dataset.\n\n3. What's more, I noticed that the authors did not provide any supplementary materials. Could the authors provide some visual demos to better observe the overall effect of this method?\n\n[1] Robust e-NeRF: NeRF from Sparse & Noisy Events under Non-Uniform Motion\n\n[2] EventNeRF: Neural Radiance Fields from a Single Color Event Camera" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Overall, this paper presents an incremental 3D Gaussian Splatting reconstruction algorithm from a single event camera, without requiring the ground truth camera poses. It has a motivation and is adequate for the audience and also solid on the technical side and adds the event-based VO tricks and off-the-shelf depth model for re-initialization to 3DGS. Thus, this work is interesting for readers working at the intersection of novel view synthesis and neuromorphic vision." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The present paper proposes a novel view synthesis approach that reconstructs 3D scenes from event camera streams without precise camera poses. The main goal is to handle the unique asynchronous and irregular data characteristics of event cameras, which pose significant challenges for traditional 3D reconstruction methods. By utilizing a tracking and mapping approach inspired by SLAM pipelines, the method estimates camera motion based on prior reconstructed 3D-GS scenes and incrementally refines both the camera motion and scene representation. It's capable of handling real-world scenarios with no gt. poses, offering improved performance compared to NeRF-based methods and event-based visual odometry. It efficiently renders high-quality brightness images, outperforming baseline methods in terms of novel view synthesis and motion estimation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The setup of the real-world experiments lacks validity. Specifically, for the evaluation on the TUM-VIE dataset, the qualitative results alone are insufficient. The authors should also include quantitative analysis using no-reference image quality assessment metrics to provide a more comprehensive evaluation.\n\n2. Although the proposed method can operate using an event stream as input to reconstruct 3D Gaussians, it still relies on uniform event stream as input. The proposed method is, therefore, limited by the density of event data streams, which restricts its practical applications. \n\n3. Despite the detailed comparison of the quality of rendered images, the efficiency of the training and rendering process is not included, which is an important metric of NVS methods. Extra comparisons with other methods on training time and inference FPS would help better evaluate the proposed method.\n\n4. This method is valuable for addressing event-based visual odometry. However, the authors focus more on the NVS task, and using Gaussian functions to reconstruct grayscale scenes seems less relevant, as they are mainly suited for head-mounted devices, which reduces the method’s rationale.\n\nBeyond this I have mainly minor comments and nitpicks:\n\nl.117, the sentence contains a grammatical error and should be revised. Specifically, \"IncEventGS conduct...\" should be corrected to \"IncEventGS conducts...\".\n\nl.142, the expression should be standardized by changing \"se3\" to \"se(3)\" for clarity and consistency.\n\nl.162~186, I think the re-initialization process is vital to the method, but the main figure of the pipeline does not reflect this which may generate some confusion with readers not familiar with the method." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "In this work, we present IncEventGS, a high-quality 3D Gaussian using a single event camera, without requiring ground truth camera poses." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024inceventgs,\ntitle={IncEvent{GS}: Pose-Free Gaussian Splatting from a Single Event Camera},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0QePvFoqY6},\nnote={under review}\n}" }, "abstract": { "value": "Implicit neural representation and explicit 3D Gaussian Splatting (3D-GS) for novel\nview synthesis have achieved remarkable progress with frame-based camera (e.g.\nRGB and RGB-D cameras) recently. Compared to frame-based camera, a novel\ntype of bio-inspired visual sensor, i.e. event camera, has demonstrated advantages\nin high temporal resolution, high dynamic range, low power consumption and\nlow latency. Due to its unique asynchronous and irregular data capturing process,\nlimited work has been proposed to apply neural representation or 3D Gaussian\nsplatting for an event camera. In this work, we present IncEventGS, an incremental\n3D Gaussian Splatting reconstruction algorithm with a single event camera. To\nrecover the 3D scene representation incrementally, we exploit the tracking and\nmapping paradigm of conventional SLAM pipelines for IncEventGS. Given the\nincoming event stream, the tracker firstly estimates an initial camera motion based\non prior reconstructed 3D-GS scene representation. The mapper then jointly refines\nboth the 3D scene representation and camera motion based on the previously\nestimated motion trajectory from the tracker. The experimental results demonstrate\nthat IncEventGS delivers superior performance compared to prior NeRF-based\nmethods and other related baselines, even we do not have the ground-truth camera poses.\nFurthermore, our method can also deliver better performance compared to state-of-\nthe-art event visual odometry methods in terms of camera motion estimation." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "3D Gaussian", "Event Camera" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/0dfaa05d59ea80432e014a66936799eb890105e2.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "IncEventGS: Pose-Free Gaussian Splatting from a Single Event Camera" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0QkVAxJ5iZ
FacLens: Transferable Probe for Foreseeing Non-Factuality in Large Language Models
main
Active
Large language models;hidden question representation;non-factuality predictor;transferability
alignment, fairness, safety, privacy, and societal considerations
3;5;5;8
4;4;3;4
2;3;2;4
2;2;3;3
3;3;3;3
5.25
3.75
2.75
2.5
3
0.080845
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the weaknesses for clarification. Also, the paper has multiple typos that can be addressed." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. FacLens can be adapted to different LMs by leveraging unsupervised domain adaptation techniques, which reduces the resource-intensive need to generate new labeled data for each model. \n2. The authors present a shift from traditional non-factuality detection (NFD) to non-factuality prediction (NFP). They show that models are internally aware of whether they can accurately answer a question before generation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a model method (FacLens) to predict the likelihood of language models (LMs) generating non-factual responses before generation occurs, a task called non-factuality prediction (NFP). This work claims that, unlike traditional non-factuality detection (NFD) methods that probe response representations, FacLens probes the question's hidden representations to make non-factuality predictions. FacLens can be adapted to different LMs by leveraging unsupervised domain adaptation techniques, which reduces the resource-intensive need to generate new labeled data for each model. The authors conduct experiments across four models and three datasets to demonstrate FacLens's superior performance and efficiency compared to baseline methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The authors claim that different LLMs share similar cognitive patterns in terms of knowledge awareness, as they rely on transformer-based architectures. However, not all LMs use the same architecture; for instance, recent MoE architectures, which have gained significant popularity, replace feed-forward networks with MoE modules. It is essential to study MoE-based models to examine if this claim holds. Additionally, the proof of this hypothesis is unclear and not convincing and needs further support.\n2. Apart from the domain adaptation techniques, FacLens’s development heavily relies on previous work and lacks substantial novelty.\n3. The overall performance gain compared to baselines, particularly SAPLMA, is marginal, and so there is no compelling evidence that probing question hidden representations leads to better non-factuality prediction.\n4. In the main experiments (Table 1), NFD baselines are excluded, and only a selected set of methods categorized under NFP are reported.\n5. The experiments do not represent a practical LM generation setting, as they are limited to a set of short-form QA datasets. While the authors define the NFP task, they compare it with naive baselines, such as entity popularity, and do not consider more sophisticated methods developed for factuality improvement using model internals.\n6. Some findings, such as LLMs generally recognizing “whether they know” in their middle layers, have been previously reported and are not new findings.\n\nOverall, this paper lacks significant contributions, and the limited experimental setup and marginal performance gains make it challenging to claim that the proposed method is more effective than its existing counterparts." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. While the authors criticized the output statement in cases when LLMs cannot provide factual answers in the end of Sec. 2, I could not understand the criticization because the statement is not necessarily a non-factual answer. I hope the authors will clarify the point." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper tackled an interesting task of non-factuality prediction (NFP), tried to solve the problems of previous work in efficiency and transferability, and proposed a lightweight NFP model, named Factuality Lens (FacLens). The experiments highlighted FacLens’s superiority in both effectiveness and efficiency." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Unlike studies on post-hoc detection of non-factual responses, this paper studied non-factuality prediction (NFP), that aims to predict whether an LLM will generate a non-factual response to a question before the generation process. While previous efforts on NFP have demonstrated LLMs' awareness of their internal knowledge, they still faced challenges in efficiency and transferability. Thus, this paper proposed a lightweight NFP model, named Factuality Lens (FacLens), which effectively probes hidden representations of questions for the NFP task. Further, this paper discovered that the hidden question representations from different LLMs exhibit similar NFP patterns, which enables the transferability of FacLens across LLMs to reduce development costs. The experiments highlighted FacLens’s superiority in both effectiveness and efficiency." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. While the observations that the hidden question representations from different LLMs exhibited similar NFP patterns in Sec. 4.2 is interesting, we are eager to know why they happened and why unsupervised domain adaptation is possible in cross-LLM FacLens. It is better to investigate and mention some possible reasons for them, if possible, while the inspiration from the research on human cognition was mentioned in the paper.\n\nFurther, I wonder the observations in Sec. 4.2 can be really applicable to other LLMs. Can the authors mention the generalizability of the observations? \n\nMore seriously, in Sec. 5, depending on the datasets, the characteristics and the performance of LLMs seem different in Fig. 6. For example, on PQ and EQ, Qwen2 is rather different from the others, that leads to a concern that the assumption is not really correct and the transferability cannot be really valid among the LLMs. \n\n2. I have a concern about the method for NFP dataset construction. Following previous work, the authors adopted a method for QA datasets with short answers. However, all current QA datasets are not generally in the category. It is better to show how large the method can cover the current QA datasets and/or to describe how they can cope with QA datasets with longer or more complex answers.\n\n3. When the authors construct training data on just an LLM, the selection of the LLM might be important and might affect the performance. So it is better to show which LLM the authors selected as the best for constructing the training data and how they decided it.\n\n4. In human evaluations in Sec. 5.3, it is better to have comparisons with some baselines." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Nothing major, see weaknesses." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "* This paper solves a series of important problems, notably the transferability in the NFP domain. This kind of domain adaptation is an important area of NLP research and should be encouraged across the community.\n* The experiments are very well thought, extremely detailed, and the paper is overall pretty decently written." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work introduces FactLens, a NFP model which, unlike previous NFP methods, exhibits transferability across different LLMs. The authors make the following major contributions through the introduction of FactLens:\n1. Show clear evidence of the importance of hidden layers in the pre-hoc/NFP setting.\n2. Introduce a novel architecture for Factlens, such that the Factlens model weights can be adapted to a new LLM for good performance on the NFP task. \n3. Conduct experiments to show superior performance in comparison to both post-hoc models, as well as similar NFP models. Factlens is also considerably faster than other similar models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Certain questions that seem to remain open:\n* The size of LLMs used for training seems small. While this isn’t a major concern, it would be good to understand how FactLens does with larger models (say size > 50B)\n* It’s not clear whether Domain Adaptation is used for the results in Table 1. If no, how does the domain-adapted Factlens do in comparison to other NFP baseline?. In general, the authors should clarify which of the results in the paper use DA for FactLens. This can be added in the experimental setup." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Refer to the weakness." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The method is clear and straightforward. FactLens is a simple probing method to assess question factuality, and its streamlined structure makes it easy to adopt.\n2. Excellent efficiency and transferability. The experiments demonstrate that FactLens can be effectively transferred to other models, performing well across various benchmarks, including PopQA, Entity Questions, and Natural Questions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces FactLens, a probing method designed to predict whether a large language model (LLM) is likely to provide factual responses to a given question. Additionally, the authors demonstrate that FactLens can be effectively transferred across different models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The primary weakness is that FactLens does not show a clear performance improvement over previous methods. Both Figure 3 and Table 1 indicate that FactLens performs comparably to, but not significantly better than, prior approaches.\n2. The experiment lacks a wider range of benchmarks. Adding more datasets, such as TriviaQA [1] and HotpotQA [2], could provide a more comprehensive evaluation.\n\n[1] TriviaQA: A large-scale distantly supervised challenge dataset for reading comprehension. Joshi et al., 2017. \n[2] HotpotQA: A dataset for diverse, explainable multi-hop question answering. Yang et al., 2018." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024faclens,\ntitle={FacLens: Transferable Probe for Foreseeing Non-Factuality in Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0QkVAxJ5iZ},\nnote={under review}\n}" }, "abstract": { "value": "Despite advancements in large language models (LLMs), non-factual responses remain prevalent. Unlike extensive studies on post-hoc detection of such responses, this work studies non-factuality prediction (NFP), aiming to predict whether an LLM will generate a non-factual response to a question before the generation process. Previous efforts on NFP have demonstrated LLMs' awareness of their internal knowledge, but they still face challenges in efficiency and transferability. In this work, we propose a lightweight NFP model named Factuality Lens (FacLens), which effectively probes hidden representations of questions for the NFP task. Besides, we discover that hidden question representations sourced from different LLMs exhibit similar NFP patterns, which enables the transferability of FacLens across LLMs to reduce development costs. Extensive experiments highlight FacLens’s superiority in both effectiveness and efficiency." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Large language models", "hidden question representation", "non-factuality predictor", "transferability" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/e2297ed06ca065d361ec3f28961b352c3377db10.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "FacLens: Transferable Probe for Foreseeing Non-Factuality in Large Language Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0QnKnt411O
Unsupervised Zero-Shot Reinforcement Learning via Dual-Value Forward-Backward Representation
main
Active
unsupervised reinforcement learning;zero-shot generalization;skill discovery;successor representation
reinforcement learning
5;5;8
5;4;3
3;2;3
3;2;3
2;3;4
6
4
2.666667
2.666667
3
-0.866025
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see the weaknesses part." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This work is well motivated and the promotion of exploratory behaviour during the pre-training phase to increase the data coverage is reasonable.\n\nThe paper is well written and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work presented a pre-training framework for zero-shot reinforcement learning by leveraging forward-backward (FB) representation. Unlike some previous study on zero-shot RL, this work analysed the performance gap of FB in the online learning setting compared with a specific exploration strategy. The authors then proposed a new exploration reward based on contrastive learning and incorporated this into FB traning by end-to-end online learning. The proposed method is evaluated in zero-shot online URL and fine tuning settings. Experimental results suggest that it achieved improved performance than some baseline methods given limited interactions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The major contribution in this work is the combination of an exploration reward with FB learning, where the technical novelty is limited.\n\nAlthough the performance gain shown in Table 1 looks strong, I have concern on the baselines used in comparison for this setting. It is unclear why these are suitable baselines here for the problem of zero-shot online URL. Many baselines here are either not designed for online learning with self-generated trajectory (for example, LRA-SR used in (Touati et al., 2023)) or not zero-shot testing (if I’m not mistaken some baseline finetunes longer steps, for example, CeSD with 100k interactions rather than 1e^4 used in this work). So it does not look so exciting when explicit exploration techniques are used in combination with a zero-shot technique. A naive approach for this problem would be using a method of pure exploration (e.g. r_{ce} proposed in this work as the proposed intrinsic reward itself has the capability to collect an offline dataset.) or a method of skill discovery to collect an offline dataset with better data coverage than FB, then training FB on top of this dataset and testing its ability in zero-shot generalisation. This could probably better demonstrate the advantage of combining exploration reward with FB in online URL.\n\nFollowing the previous comment, for Table 1, it would be better to group the baselines into several categories so that it is clear from the table which property (zero-shot, online, offline, exploration or skill discovery) each method has or does not have.\n\nThere is no theoretical analysis to support the proposed objective function and reward function either in the FB pre-training stage or fine-tuning stage. It is unclear what guarantee of zero-shot generalisation of the proposed method can have.\n\nQuestions and suggestions:\n\nFor Fig. 2 and Fig. 3, Please add more descriptions to the caption so that the reader can understand the main discovery and meaning of the figure." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Reflecting the weaknesses discussed above, my key questions are:\n - How broadly applicable is the method, particularly beyond robotic control tasks? Are there any preliminary results in other domains that the authors could include?\n - How important is their particular choice of reward to encourage exploration -- the contrastive entropy reward? How well would other rewards stand-in for this, or is it particularly well suited?\n - Similar questions for the reward mapping technique. Could we see more justification for their approach and other alternatives explored?\n - Can the authors provide code so that others can directly reproduce the results?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Zero-shot generalisation in online settings is an important problem in RL, where progress is going to be essential for successfully deploying RL in real-world applications. DVFB advances the field's understanding of how to create agents that can solve and adapt to new tasks immediately, without requiring extensive retraining. \n - The authors build on foundational concepts in the field such as SR and FB representations. The paper does a good job identifying the limitations of FB in online settings, pinpointing insufficient exploration as a core issue, and using their insights to justify the extensions of FB into DVFB. The introduction of a novel exploration value function alongside the skill value function is an original approach that enhances exploration and, as shown in their motivation and results, improves generalisation. Furthermore, the addition of a reward mapping is a valuable addition that enables them to demonstrate both zero-shot generalisation and fine-tuning in an online setting.\n - Impressive results: the paper presents rigorous experiments across 12 diverse control tasks in a widely used benchmark for tasks requiring fine-grained motor control. In terms of zero-shot performance, their method outperforms baseline methods across the 12 tasks, particularly in tasks where others struggle with exploration (further supporting their motivation). It also outperforms on fine-tuning performance, showing faster adaptation and greater stability compared to the state-of-the art in URL.\n - The paper is well written, guiding the reader through the problem being addressed, relevant related work, the motivations for their extensions, implementation, results and conclusions. The methodology section is nicely laid out, with clear explanations and schematics detailing components of the model. Their experimental results are clearly presented and easy to understand." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work introduces the Dual-Value Forward-Backward (DVFB) representation framework for unsupervised reinforcement learning (URL). It tackles the challenge of enabling agents to generalise to new tasks without further training (zero-shot generalisation) in addition to fine-tuning adaptation in online settings.\n\nIt builds on successor representation (SR)-based approaches which aim to learn a representation of expected future states and have been shown to aid zero-shot generalisation in RL. In particular, the work extends forward-backward (FB) representations by learning both forward and backward dynamics. The authors explore failures in FB-based approaches in online URL settings and find that it is due to inadequate exploration. They address this by introducing an intrinsic reward based on contrastive learning to encourage exploration, combining this “exploration value” function to the usual “skill value” function to arrive at their DVFB. The authors also introduce a fine-tuning scheme using a reward mapping technique to add further online adaptation capabilities to their method. \n\nThe authors validate DVFB across twelve robot control tasks in the DeepMind Control Suite and demonstrate the approach gives both superior zero-shot and fine-tuning performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Potential for broader applicability: the paper focuses on tasks in the DeepMind Control Suite. This demonstrates DVFB’s capability in robotic control tasks, but leaves one wondering about the framework's versatility which otherwise seems very general. Could the authors discuss the potential for adapting DVFB to other domains, such as navigation? If possible, preliminary results or discussion on expected performance in different contexts would broaden the scope of the work.\n - Other intrinsic rewards: the paper attributes improvements to enhanced exploration, but it doesn’t delve into specific advantages that contrastive entropy is providing over other intrinsic rewards. Going beyond the DVFV w/o CE ablation experiment and trying out other intrinsic rewards (beyond just RND rewards) could add further insight into their particular choice of contrastive entropy.\n - Sparse presentation of reward mapping technique: there’s limited detail and justification for the reward mapping technique. It’s unclear whether this particular mapping method is optimal or if other strategies might perform equally well or even better in different tasks. Further exploration would clarify its effectiveness and limitations. Could the authors discuss more justification for this approach, as well as analysing some alternatives? \n - Reproducibility: lack of code to reproduce the results: providing code would significantly enhance the paper’s accessibility. While the inclusion of pseudocode and hyperparameters is appreciated and provides important details for the method, the absence of actual code makes it challenging for others to fully replicate the experiments or apply the DVFB framework to other settings." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Could you furnish more detailed explanations regarding the metrics employed in the studies, especially for figures where the axes and comparisons lack clarity? Supplementary labeling and contextual information would assist readers in appropriately interpreting your findings.\n\n2. What is the performance of DVFB in relation to other contemporary zero-shot generalization methods, and what are the reasons for the selection or exclusion of specific baselines? Incorporating a broader array of comparisons or elaborating on these selections would bolster the assertion of enhanced performance.\n\n3. Could you provide a detailed explanation of the practical execution of the reward mapping technique, possibly including pseudocode? Additional detail would elucidate this component's impact during fine-tuning.\n\n4. In what manner does the contrastive entropy reward facilitate skill differentiation, and can you present empirical data that substantiates its efficacy? An elucidation or ablation of the role of this reward would improve comprehension.\n\n5. Have you performed any analysis to assess the sensitivity of DVFB to essential hyperparameters? This would be beneficial to evaluate the resilience of the framework across diverse contexts and circumstances.\n\n6. Could you elaborate on the possible limits of DVFB, including computational complexity and scalability in practical applications? Considering these criteria would yield a more equitable perspective on the approach's practical viability." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The research presents a novel Dual-Value Forward-Backward (DVFB) paradigm that integrates skill and exploratory value functions to improve data variety and zero-shot generalization in online URL, providing an innovative method for reward-free learning.\n\n2. Should the suggested DVFB approach demonstrate efficacy, it may rectify a basic constraint in reinforcement learning by facilitating zero-shot generalization absent task-specific incentives, hence potentially enabling RL agents to adapt more readily to varied real-world contexts." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The study presents the Dual-Value Forward-Backward (DVFB) framework for zero-shot generalization in online unsupervised reinforcement learning (URL). DVFB integrates a skill value function with an exploration value function to enhance data diversity and generalization in the absence of task-specific rewards. It utilizes a contrastive entropy intrinsic reward to improve exploration and a dual-value fine-tuning method to optimize downstream task performance, claiming good results in continuous control tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The experimental configuration and illustrations are challenging to interpret, with scant explanation offered for particular measures and comparisons. Enhanced labeling, elucidation of axes and benchmarks, and uniform layout throughout figures would facilitate comprehension of the data and augment the paper's readability. Figure 6 has mixed x-axis labels, which needs an improvement. Legends can be bigger w/o affecting the size of total figure for example Figure 7. \n\n2. The method depends on several essential network hyperparameters given in Table-3 yet the research fails to analyze the sensitivity of the results to these selections. An investigation of network hyperparameter sensitivity would enhance confidence in the robustness and generalizability of the findings.\n\n3.The implementation and/or utilization of the reward mapping technique for fine-tuning can be clarified. Integrating pseudocode would improve the accessibility and reproducibility of this component.\n\n4. The report omits a discussion of potential limitations, including computing cost, scalability, and difficulty in real-world implementation. Recognizing these factors might yield a more equitable viewpoint and inform subsequent research." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "The proposed dual-value forward-backward representation framework is the first method to simultaneously achieve superior zero-shot generalization and fine-tuning task adaptation capabilities in online URL." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024unsupervised,\ntitle={Unsupervised Zero-Shot Reinforcement Learning via Dual-Value Forward-Backward Representation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0QnKnt411O},\nnote={under review}\n}" }, "abstract": { "value": "Online unsupervised reinforcement learning (URL) can discover diverse skills via reward-free pre-training and exhibits impressive downstream task adaptation abilities through further fine-tuning.\nHowever, online URL methods face challenges in achieving zero-shot generalization, i.e., directly applying pre-trained policies to downstream tasks without additional planning or learning.\nIn this paper, we propose a novel Dual-Value Forward-Backward representation (DVFB) framework with a contrastive entropy intrinsic reward to achieve both zero-shot generalization and fine-tuning adaptation in online URL.\nOn the one hand, we demonstrate that poor exploration in forward-backward representations can lead to limited data diversity in online URL, impairing successor measures, and ultimately constraining generalization ability.\nTo address this issue, the DVFB framework learns successor measures through a skill value function while promoting data diversity through an exploration value function, thus enabling zero-shot generalization.\nOn the other hand, and somewhat surprisingly, by employing a straightforward dual-value fine-tuning scheme combined with a reward mapping technique, the pre-trained policy further enhances its performance through fine-tuning on downstream tasks, building on its zero-shot performance.\nThrough extensive multi-task generalization experiments, DVFB demonstrates both superior zero-shot generalization (outperforming on all 12 tasks) and fine-tuning adaptation (leading on 10 out of 12 tasks) abilities, surpassing state-of-the-art URL methods." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "unsupervised reinforcement learning", "zero-shot generalization", "skill discovery", "successor representation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/345f7cb21f8483f5b6dfa4ec198edf3994b839d6.pdf" }, "presentation": null, "primary_area": { "value": "reinforcement learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Unsupervised Zero-Shot Reinforcement Learning via Dual-Value Forward-Backward Representation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0QvLISYIKM
Pointwise Information Measures as Confidence Estimators in Deep Neural Networks: A Comparative Study
main
Active
information theory;confidence estimation;deep neural networks
interpretability and explainable AI
3;5;6;6;6
4;3;4;3;3
1;2;3;3;3
1;3;3;2;3
2;3;4;3;2
5.2
3.4
2.4
2.4
2.8
-0.490098
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* Can you please address the concerns about whether the improved performance of PVI is due to the PVI measure itself or the temperature scaling in the PVI estimator?\n\n* What is the reason for the contradictory findings where PSI is shown to be a better confidence estimator than PVI in experiments on correlation to Margin, while PVI outperforms PSI in experiments on misclassification detection, selective prediction, and calibration analysis?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* The paper provides a comprehensive theoretical and empirical analysis of three pointwise information measures (PMI, PVI, and PSI) on their effectiveness to be used as confidence estimation tools. It demonstrates that these measures can be applied in a post-hoc manner and do not require modifying the model architecture or retraining the network.\n\n* Empirical evaluation covers broader scope including misclassification detection, selective prediction and calibration error analysis. These aspects are crucial for thoroughly analyzing the reliability of the confidence measures in supporting model predictions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates the use of information-theoretic measures for confidence estimation in deep neural networks in a post-hoc manner. It specifically compares three measures from the prior works: pointwise mutual information (PMI), pointwise $\\mathcal{V}$-information (PVI), and pointwise sliced mutual information (PSI), on their effectiveness as tools for confidence estimation. The study examines the theoretical properties of these measures in terms of invariance, correlation with margin, and convergence rate. Empirical evaluations are conducted on tasks such as misclassification detection, selective prediction, and calibration error analysis, using image classification tasks. These evaluations compare the three measures against baseline methods including softmax, margin, max logit, and negative entropy. The results indicate that PVI outperforms both PMI and PSI in terms of effectiveness as a confidence estimation tool." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* Despite the use of pointwise information measures requiring the training of additional models in a post-hoc manner, both PMI and PSI perform worse than the baseline softmax measure, as evidenced by the results in Table 3 and Table 4. Additionally, the PVI estimator employs temperature scaling, a post-hoc confidence calibration method, which raises concerns about whether the improved performance of PVI is due to the PVI measure itself or the temperature scaling. The paper would benefit from further evaluation of additional benchmark methods to provide clarity on this issue, specifically: (1) PVI estimator without the temperature scaling, and (2) Softmax (SM) with temperature scaling [Guo et. al. 2017]\n\n* Given the focus and motivation of the paper on exploring post-hoc confidence estimation tools, the empirical evaluation does not include comparisons with established post-hoc confidence calibration methods. To address this gap, it would be beneficial to compare the proposed measures with well-known methods such as Temperature Scaling (TS) [Guo et. al. 2017] and Ensemble Temperature Scaling (ETS) [Zhang et. al. 2020].\n\n\n[Guo et. al. 2017] Guo, Chuan, Geoff Pleiss, Yu Sun, and Kilian Q. Weinberger. \"On calibration of modern neural networks.\" In International conference on machine learning, pp. 1321-1330. PMLR, 2017.\n\n[Zhang et. al. 2020] Zhang, Jize, Bhavya Kailkhura, and T. Yong-Jin Han. \"Mix-n-match: Ensemble and compositional methods for uncertainty calibration in deep learning.\" In International conference on machine learning, pp. 11117-11128. PMLR, 2020." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. In Table 2, why do $n$ for PMI and PSI differ in value? I fail to see the results to be compatible with each other if the $n$ values are not comparable.\n2. I might not get 100% on the correlation between the properties of each pointwise information measure outlined in Section 3 with the results in Section 4. For example, how would you correlate the invariance property, in which PMI theoretically has an edge, with the result obtained in Section 4 for failure prediction in Section 4.1 and confidence calibration in Section 4.2?\n3. For someone who is not that familiar with the following line of work, with regards to the Convergence Rate part detailed in Section 3.3, are there any possible ways to model $\\mathcal{V}$ for PVI in a way such that its estimation error are comparable to $|\\mathrm{pmi}(x;y) - \\hat{\\mathrm{pmi}}_n|$ as in PMI case?\n4. Typos:\n- In point 1 of \\textbf{Contributions} , there should be a period between estimation and We (\"estimation. We\" instead of \"estimation We\". [Section 1]\n- \"from in\" -> \"from\" [Section 4]" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is well-written and highly enjoyable to read.\n2. The explanation regarding experiments in this paper goes in-depth, and I highly appreciate the authors for making Jupyter Notebook and source code available.\n3. While this paper does not propose any new methods, it provides novel insights on utilizing various pointwise information measures to estimate the confidence of DNNs." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The following paper conducts theoretical and experimental analysis on different pointwise information measures that serve as a metric denoting the confidence of neural network predictions. It considers three pointwise information measures: (1) pointwise mutual information (PMI), (2) pointwise $\\mathcal{V}$-information (PVI), and (3) pointwise sliced mutual information (PSI). Initially, the paper introduces the formal definition of each information measure and its pointwise version, followed by analyses of each pointwise measure on (1) invariance properties, (2) geometric properties, and (3) convergence properties. There are also experiments on failure prediction and confidence calibration tasks to measure each pointwise information measure in terms of confidence estimation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Experiments available in Section 4 of the paper are done on relatively small datasets and architectures. Is it possible to scale up the dataset (TinyImageNet, ImageNet) and architecture (ViT, DeIT), just like the experiments conducted by Jaeger et al., 2023 (https://arxiv.org/abs/2211.15259)? I am asking because benchmark methods used for comparison in Section 4 evaluate their method on a relatively larger scale in terms of data and architecture within their original paper.\n2. More on the Questions section." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- In L500, why convergence rate is a crucial factor for confidence calibration?\n- Why PVI is favorable for complicated dataset than other PI measures?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- It provides theoretical analysis regarding the properties of several PI measures, which are supported by empirical observations.\n- Effectiveness of the measures are validated via two types of experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Uncertainty estimation becomes essential for ensuring safety AI deployment. To estimate this, various measures, commonly based on softmax probs, are employed, but they are often poorly calibrated. The authors handle this issue by utilizing pointwise information measures - PMI, PSI, PVI. They analyze several properties of measures to validate its reliability, i.e., invariance and sensitivity to margin, and conduct empirical evaluations to support its effectiveness on several datasets. Experimental results provide some findings regarding the superiority and scalability among the measures." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- More comprehensive analysis with other uncertainty metrics are needed, such as MC Dropout, MCMC, or Laplace approximation. It also needs to compare with non-pointwise information measures, such as MINE [1].\n- Empirical results are based only on small-scale datasets, such as MNIST or CIFAR-10, although this paper aims to address scalability.\n- To better understanding, it would be helpful to add some visualizations, such as saliency map (with more curated examples as well as Fig.5), ROC curve, or ECE diagram.\n\n[1] Belghazi et al., Mutual Information Neural Estimation, ICML 2018" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. What do the authors mean by a “post-hoc manner” in L-45? Is this post-hoc recalibration technique with additional hold-out calibration data to fine-tune some learnable parameters?\n2. Remark 1 is vague to me. Firstly, what kinds of uncertainty are you talking about (aleatoric or epistemic)? It would be great if the authors could explain this kind of uncertainty through the lens of IT (see Eq.1 in [3]). Secondly, why when a classifier is uncertain on X, the uncertainty about $g(X)$ should ideally be the same? Can you formally explain this argument and give some examples about this?\n3. In proof of Prop.6, while [4] provides estimation error bounds on the sample marginal distribution $P(X)$, why the authors can trivially apply their results on the conditional $P(X|Y)$? \n4. Could the authors please compare methods with the sharpness score [5] in Section 4.2? I think this is important because lower ECE is only a necessary condition, it is not a sufficient condition to evaluate a good uncertainty estimation with DNN.\n5. L-172 mentioned that PVI uses temperature scaling, is this the main reason PVI achieves the lowest ECE in Tab.4?\n6. Can PI measures extend to other kind of dataset such as text, audio, video, etc.? Is there any challenge with this extension?\n\nReferences:\n\n[1] Goldfeld et al., Sliced mutual information: A scalable measure of statistical dependence, NeurIPS, 2021.\n\n[2] Xu et al., A theory of usable information under computational constraints, ICLR, 2020.\n\n[3] Mukhoti et al., Deep deterministic uncertainty: A new simple baseline, CVPR, 2023.\n\n[4] Jiang et al., Uniform Convergence Rates for Kernel Density Estimation, ICML, 2017.\n\n[5] Kuleshov et al., Calibrated and sharp uncertainties in deep learning via density estimation, ICML, 2022." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- This paper is very well-written and is clear to understand the important aspects of the algorithm.\n- I like the new direction of using IT tools (e.g., mutual information, conditional entropy, etc.) to improve the reliability of DNN. I believe the benchmark results of three recent PI measures are useful for communities.\n- The theoretical results are solid with clear mathematical notations, clear statements, and proof of the invariance, geometric properties, and convergence rate per each PI measure.\n- The theory is also confirmed by experimental evidence, e.g., Fig.1 with the experiment on correlation to margin results in geometric properties, Table 2 with the convergence rate.\n- The experimental results are extensive with several settings across different modern DNN architectures on standard benchmark datasets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the impact of three pointwise information (PI) measures on the uncertainty quantification quality with Deep Neural Network (DNN). Through the lens of Information Theory (IT), the authors provide rigorous theoretical properties regarding the invariance, geometric, and convergence rates. Extensive experimental results confirm the theoretical arguments, and, the benchmarking suggests the pointwise V-information (PVI) outperforms the mutual information (PMI) and the sliced mutual information (PSI) in failure prediction and confidence calibration." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The novelty regarding a proposed method is weak since PMI, PSI [1], and PSI [2] have been proposed before.\n- The novelty in theoretical analysis is quite weak. Specifically, the invariance properties have been mentioned in Section 3 of [1] and the convergence rate has been analyzed in Section 3 of [1] and Section 4 of [2].\n- The connection between theoretical results and model uncertainty is unclear to me. Details are in Question 2.\n- Three PI measures are less computationally efficient than other baselines (e.g., standard Softmax) by requiring additional models and computes either $pmi(x;y)$, $psi(x;y)$, or $pvi(x\\rightarrow y)$. \n- The experimental results also lack some measurements such as sharpness and predictive entropy to assess the uncertainty quality performance." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please observe and address my questions and comments in the weakness section, such as on the strength of claims, usefulness of theoretical results for confidence estimation, experimental design, and others." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "- Approaching confidence estimation from an information-theoretic perspective provides an interesting angle, and the suggested information measures seem relevant and somewhat practical. \n- The paper clearly outlines multiple factors of motivation for the work, which help put the approach into a broader context.\n- The information measures are closely examined and various theoretical properties are studied. This is also obvious from the substantial appendix which lists many properties of the information measures and possible estimation methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper explores the use of point-wise information measures as notions of prediction confidence for neural networks. They propose three existing information measures (PMI, PSI, PVI) with associated estimation methods, and state their theoretical properties in terms of invariance to transformations, geometric properties w.r.t. decision boundary, and convergence rates of the estimators to the true measures. The results are motivated as useful or intuitive for uncertainty quantification or confidence estimation. Then two experiments on misclassification detection and selective prediction are performed, where the measures are compared to a few simple alternative notions of model confidence. Finally, their calibration property in terms of ECE is examined. The authors suggest that their point-wise information measures provide accurate and robust measures of model confidence." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My main concerns are with respect to the claims made by the paper, obtained insights, and the experimental design and connection to confidence estimation. I will list my points of concern under each of these categories, albeit they are connected.\n\nClaims\n- The proposed measures are motivated as a post-hoc approach to confidence estimation. All the estimation methods for PMI/PSI/PVI require custom neural network modeling and training. How is this supposed to be post-hoc? For example, how am I supposed to apply this to an existing, pre-trained neural network that I treat as a black-box and do not want to fine-tune? I do not think this qualifies as post-hoc.\n- The proposed measures are motivated by a “Relationship to Probabilistic Causation” yet this relationship is never mentioned again or examined, and only briefly mentioned in the limitations/future work section. In that section it is then also claimed that \"the PI measures are the optimal choice of explainability\" but there are no proper experiments on model explainability or causality, so this claim is not backed up in any way.\n- The proposed measures are motivated by their “Direct Probability Computation”, but given their value ranges the only way to obtain probabilities is to pass them through a squashing function such as softmax, which is precisely done in the experiments. So how does the interpretation of obtained probabilities, and their associated reliability, differ in any way from just a regular softmax on logits? The associated claims on \"robustness\" are not examined or backed up in any way.\n- The proposed measures are motivated by the need for “uncertainty quantification” and prevalent miscalibration of neural networks. Firstly, recent research has shown that modern neural network architectures such as transformers (not considered here) can in fact be quite well calibrated [1,2], and even if this was not the case, their proposals do not address a way to remedy model miscalibration but rather just suggest another confidence measure. Their claim on having “better calibrated” confidence measures is unconvincing to me due to their experimental design (see below), and thus their very strong claim on \"outperforming all existing baselines for post-hoc confidence estimation\" is poorly backed up. Finally, the relationship between proposed measures and meaningful uncertainty interpretations is very speculative and does not reach beyond a few broad and high-level arguments in their remarks (see below). So overall, this angle of motivation is also lacking based on their strong claims.\n\nInsights\n- I have a key question: By the invariance properties in sec 3.1 we have that PMI is best, by the geometric and convergence properties in sec 3.2. and 3.3 we have that PSI is best, but in the performed experiments we find that PVI is best. How can you reconcile this and claim that theory and experiments are in line with each other?\n- To re-iterate on the connection to uncertainty quantification: it is repeatedly stated that there is a high relevance for “model uncertainty”, which equates to a notion of epistemic uncertainty. But then, Remark 1 motivates that uncertainty should be invariant to data transformations, which now relates to notions of data (aleatoric) uncertainty. Yet overall, the quantity of interest is in fact p(y|x) which is simply predictive uncertainty of the model given an input. So, it does not seem to me like there is a principled association between the information measures and actual notions of uncertainty, and the authors are not clear about what kind of uncertainties we are trying to address. Overall, the connections to uncertainty are mainly contained in the motivating introduction, and in Remark 1 and Remark 2, and are all very high-level and speculative.\n- Regarding Remark 1: the quantity of interest is p(y|x), whereas data transformations are applied to features X. Since we then have that $g(X) \\neq X$, I don't necessarily see an issue if $p(y| g(X)) \\neq p(y|X)$ because we are conditioning on a different quantity.\n- Regarding Remark 2: The provided interpretation for confidence estimation does not take into account data atypicality or OOD'ness [3]. These samples may lay far away from the decision boundary/margin but also in the tail of the data support, and thus should ideally exhibit low confidence. Also, the interpretation on confidence correlating with margin distance is only desirable for overlapping supports. If e.g. $P(X|Y=0)$ and $P(X|Y=1)$ are clearly separated (as e.g. used in Prop. 4) then we would desired maximum confidence everywhere. So, it is unclear to me how Remark 2 follows from the stated results and how directly applicable/useful these results are.\n- Regarding sec 3.2: The section seems to borrow different tools from different papers to show results for the different information measures. However, the assumptions (which seem very strong), conditions of validity, and form of results are all very different, and no interpretations are given. How do they relate to each other in terms of strength and the individual components influencing them? For example, what is Prop. 4 for PMI useful for and why do we not have a similar result in regards to \"sample-wise margin\" as for PSI and PVI? Similar questions also apply to sec 3.3 on convergence results.\n- Regarding L228: based on the wording it is unclear what the “sample-wise margin” is supposed to be. A mathematical definition seems necessary here.\n- Regarding the margin correlation experiment: The results are confusing to me because in Table 1 it seems like the results between different measures are quite different (e.g., PMI has lower and more volatile correlations than PSI), yet the UMAPs virtually all look the same. How should I understand that?\n\nExperiments\n- My main concern is about the fact that their information measures are passed through a softmax function and *calibrated with temperature scaling* before benchmarking against other methods. This seems like a very biased comparison, since we observe in App D.1 that these operations significantly alter the distributions of the information measures, and improve upon the considered performance metrics. It seems unreasonable to me to *scale and calibrate* your measures beforehand, and then claim afterwards that they provide \"direct probabilities\" and \"well-calibrated\" confidence estimates. How is this a fair comparison to any baselines that are not subject to the same transformations, e.g. ML, LM? In that context, do you also apply temperature scaling to any of the other baselines such as softmax (MSP, SM)? I feel like any performance claims should rather be reported for the raw information measures instead, since it otherwise becomes unclear where any benefits stem from.\n- In the experiments on confidence calibration only two simplistic baselines are considered, and they are marginally outperformed. To claim that they are \"outperforming all existing baselines\" seems like a very strong claim in that light. It would be more meaningful to consider other baselines for confidence estimation, including those that have been subjected to a similar approach of re-calibration as they do for their own measures (using temperature scaling), e.g. isotonic regression [4], regularization [5], or other uncertainty methods like models with variance predictor [6] etc.\n- Relatedly, the results in Table 3 are often within each other's margin of error, so there are some questions on the reliability or significance of “outperforming”.\n- In L401-403, what is the intuition for only working with features from the last layers? This is not clearly explained.\n- Are there any clear principles guiding the choices of estimation methods for the information measures, and associated transformations (i.e., softmax or temperature scaling)? Based on the appendix it seems like purely based on hold-out performance. If so, why not consider other squashing functions or re-calibration procedures? The choices are not well documented and seem primarily motivated for their simplicity.\n- I am personally missing a more detailed and meaningful interpretation of the results beyond stating what can be seen in the provided results tables.\n\nSummary\n- In conclusion, I find that the paper makes overly strong claims and motivates the work from multiple angles which are then left unexplored or never properly analyzed. The experimental design raises some questions and combined with the marginal improvements in experiments casts doubts on the practicality and usefulness of the approach. I am struggling to see the real novelty of the paper. The proposed information measures and their estimation methods are all taken from existing papers, and many of the theoretical results rely strongly on these papers as well. Is the theoretical analysis novel? Personally, it is hard for me to say since I am unfamiliar with this research domain. Is the novelty then in its application/use for confidence estimation? The experiments are unconvincing, and the connections to uncertainty do not go beyond some high-level arguments. In addition, their theoretical insights and empirical results seem somewhat contradictory on what the best information measure is supposed to be. For example, they conclude that \"This superior performance is likely due to PVI being the most well-rounded metric, particularly in terms of its invariance and margin sensitivity”, even though Remarks 1, 2 and 3 on these properties rank PVI lowly. While the paper explores some interesting information-theoretic tools, their use for robust and reliable confidence estimation is substantially lacking in my opinion.\n\nReferences\n\n[1] Minderer, Matthias, et al. \"Revisiting the calibration of modern neural networks.\" Advances in Neural Information Processing Systems 34 (2021): 15682-15694.\n\n[2] Wang, Deng-Bao, Lei Feng, and Min-Ling Zhang. \"Rethinking calibration of deep neural networks: Do not be afraid of overconfidence.\" Advances in Neural Information Processing Systems 34 (2021): 11809-11820.\n\n[3] Yuksekgonul, Mert, et al. \"Beyond confidence: Reliable models should also consider atypicality.\" Advances in Neural Information Processing Systems 36 (2024).\n\n[4] Naeini, Mahdi Pakdaman, and Gregory F. Cooper. \"Binary classifier calibration using an ensemble of near isotonic regression models.\" 2016 IEEE 16th International Conference on Data Mining (ICDM). IEEE, 2016.\n\n[5] Mukhoti, Jishnu, et al. \"Calibrating deep neural networks using focal loss.\" Advances in Neural Information Processing Systems 33 (2020): 15288-15299.\n\n[6] Maddox, Wesley J., et al. \"A simple baseline for bayesian uncertainty in deep learning.\" Advances in neural information processing systems 32 (2019)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024pointwise,\ntitle={Pointwise Information Measures as Confidence Estimators in Deep Neural Networks: A Comparative Study},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0QvLISYIKM},\nnote={under review}\n}" }, "abstract": { "value": "Estimating the confidence of deep neural network predictions is crucial for ensuring safe deployment in high-stakes applications. The softmax probabilities obtained from the neural networks are commonly interpreted as confidence scores but they are often poorly calibrated. Many existing methods addressing this issue involve modifying the network architecture or training procedure, which may not always be feasible in practice. In this paper, we use tools from information theory to estimate the confidence of deep neural network’s predictions in a post-hoc manner. In particular, we compare three pointwise information (PI) measures: pointwise mutual information (PMI), pointwise $\\mathcal{V}$-information (PVI), and the recently proposed pointwise sliced mutual information (PSI). We show in this paper that these PI measures naturally relate to confidence estimation. We first study the invariance properties of these PI measures with respect to a broad range of transformations. We then study the sensitivity of the PI measures to geometric attributes such as margin and intrinsic dimensionality, as well as their convergence rates. We finally conduct extensive experiments on benchmark computer vision models and datasets and show the effectiveness of these measures as tools for confidence estimation. A notable finding is that PVI is better than PMI and PSI for failure prediction and confidence calibration, outperforming all existing baselines for post-hoc confidence estimation. This is consistent with our theoretical findings which suggest that PVI is the most well-rounded among the PI measures." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "information theory", "confidence estimation", "deep neural networks" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/659949f158060beeee3be91edfede5c2dbb79dc7.pdf" }, "presentation": null, "primary_area": { "value": "interpretability and explainable AI" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/8a5636e8019b0a6fa5734e470cb63bea1c64f903.zip" }, "title": { "value": "Pointwise Information Measures as Confidence Estimators in Deep Neural Networks: A Comparative Study" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0R3ha8oNPU
SecCodePLT: A Unified Platform for Evaluating the Security of Code GenAI
main
Active
Code Generation;Cybersecurity;Safety;Large Language Models
datasets and benchmarks
3;3;5;5
3;4;3;3
2;2;2;2
3;2;2;3
2;2;2;3
4
3.25
2
2.5
2.25
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. This article discusses risk assessment of code generation. Some related works on code generation may also be discussed, such as BigCodeBench [1].\n\n[1] Bigcodebench: Benchmarking code generation with diverse function calls and complex instructions. https://arxiv.org/pdf/2406.15877\n\n2. Some details are not explained clearly. In line 140 of the manuscript, the author mentions \"extracting code chunks without proper context frequently leads to false positives\". But it seems that the experiment did not perform an ablation experiment on the context field. As shown in lines 867 and 894, the context field is set to None. So I don't understand the role of context and how the solution SecCodePLT in this paper can benefit from context (how to reduce false positives).\n\n3. In line 251 of the manuscript, the author mentions \"We also introduce rule-based metrics for cases that cannot be evaluated with standard test cases\". I am not sure where the rule mentioned here comes from. Is it based on some public manufacturer's provision? \n\n4. In MITRE ATT\\&CK, the kill chain model may be common. In other words, an attacker often implements different attack stages through a series of attack techniques and tactics. It is unclear whether SecCodePLT considers such multi-stage attack and intrusion, rather than a single attack behavior.\n\n5. Some minor errors, such as the missing period after \"security-critical scenarios\" on line 76. For \"security is required.)\" on line 253, the period should probably be after \")\"." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. Promising direction. Establishing the benchmark to highlight the security risks associated with Code GenAI is a direction worth studying. \n2. Consider real-world attack behaviors and environment deployments. \n3. Compared with existing baselines from multiple perspectives and the results show the effectiveness of the proposed method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents SecCodePLT, a unified and comprehensive evaluation platform for code GenAIs' risks. Considering insecure code, the author introduces a new methodology for data creation that combines experts with automatic generation. Considering cyberattack helpfulness, the authors set up a real environment and construct samples to prompt a model to generate actual attacks. Experiments show that CyberSecEval could identify the security risks of SOTA models in insecure coding and cyberattack helpfulness." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Some related work discussions are missing. \n2. Some details are not explained clearly. \n3. There are some minor errors that need to be polished and proofread." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See above." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper presents a pioneering approach by integrating a database with two distinct security-related tasks. SECCODEPLT serves as a comprehensive platform that unifies the evaluation of GenAIs’ risks associated with code generation. This integration facilitates a holistic approach to assessing different dimensions of security risks. By associating samples with test cases, SECCODEPLT enables dynamic evaluation related to code. This method allows for real-time assessments and adjustments, providing a deeper analysis of the code's behavior in practical scenarios." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper develops SECCODEPLT, a unified and comprehensive evaluation platform for code GenAIs’ risks. It introduces a new methodology for data creation that combines experts with automatic generation for insecure code which ensures the data quality while enabling large-scale generation. It also associates samples with test cases to conduct code-related dynamic evaluation. Furthermore, it sets up a real environment and constructs samples to prompt a model to generate actual attacks for the task of cyberattack helpfulness, along with dynamic metrics in our environment." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The programming language used in the paper is limited, with Python being the sole language explored. This is inadequate for a comprehensive and large-scale benchmark. The inclusion of other programming languages like C/C++ and Java, which constitute a significant portion of recent CVEs, is crucial. These languages are more complex in syntax and more broadly applied, offering valuable insights into the capabilities of LLMs.\n2. The paper's description of the data generation process for the IC task is unclear. It mentions the use of two different mutators to generate data, yet it fails to clarify the generation of the corresponding test suites. It is uncertain whether the test suites for these new datasets are generated by LLMs or if they reuse the original suites. If generated by LLMs, how is the quality of these suites assured? If the original test suites are used, can they adapt to new contexts effectively?\n3. The paper lacks a necessary ablation study. The boundary of what is user control and what is provided by benchmark is not well clarified. The rationale behind the design of the prompts and instructions used to trigger evaluations is not well justified. For example, why do the authors use system prompts and user templates shown in the paper? Are they more reliable and efficient? Will the differences in these prompts affect the evaluation of LLM ability? If users want to use their own prompts, is there any way?\n4. The evaluation metric of security relevance is confusing and lacks rationales. It is unclear whether this metric aims to assess specific properties of LLMs or the prompts themselves. Because the benchmark is designed to evaluate LLMs, using a metric that assesses the prompts introduces confusion. Furthermore, in the SECURITY-RELEVANCY JUDGE prompt template (D.1), the security policy reminder is included as part of the user input and fed directly to the LLM. This setup may influence the evaluation of security relevance and potentially introduce bias.\n5. The ablation of the security policy reminder is missing, similar to problem 3. The paper does not discuss the reasons for choosing the security policy reminder prompt.\n6. The paper lacks a discussion on the specific defenses employed in the CH task. In realistic settings, a variety of defenses, such as firewalls and intrusion detection systems, are typically deployed. It will be insightful to know how different LLMs perform when various defenses are considered in a simulated environment.\n7. The usefulness and generalization of the CH task is limited. Practical attacks vary significantly and are influenced by diverse factors, but the scenario described in the paper lacks generalizability across different attack types and target systems. This limited setting restricts the ability to conduct an accurate and comprehensive evaluation of LLMs for the CH task. Additionally, the paper does not specify the capabilities of attackers, including the types of tools that can be used to launch attacks with LLMs. Also, the strong assumption that some internal users will click on phishing or other harmful links further reduces the task's practical relevance.\n8. Evaluation metrics in CH task. It will be better to set a specific metric to evaluate the overall ASR for the end-to-end attack. Additionally, the details regarding the evaluation process are not well-explained – whether it is a fully automated process or requires human input at various stages to guide or adjust the evaluation." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "In “Each seed contains a task description, example code, and test cases”, do all the source code samples have the task description? What are the methods used in test cases?\n\nIt is not clear how the author performs the code mutator as mentioned in “As specified in Section 3.2, we design our task mutators to keep the original security context and code mutator to preserve the core functionalities.” What types of code mutators are used here?\n\nWhat dynamic methods do the authors use for “After mutation, we also manually check the security relevance of newly generated data and run dynamic tests to ensure the correctness of their code and test cases.”?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Through experiments, SECCODEPLT outperforms CYBERSECEVAL in security relevance and prompt faithfulness, highlighting the quality of this benchmark. \nThe authors then apply SECCODEPLT and CYBERSECEVAL to four SOTA open and closed-source models, showing that SECCODEPLT can better reveal a model’s risk in generating insecure code." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes SECCODEPLT, a unified and comprehensive evaluation platform for code GenAIs’ risks.\n\nFor insecure code, the authors introduce a new methodology for data creation that combines experts with automatic generation. For cyberattack helpfulness, the authors set up a real environment and construct samples to prompt a model to generate actual attacks, along with dynamic metrics." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Many state-of-the-art methods for code generation are not mentioned and experimented in the paper, such as:\n\nJingxuan He, Martin Vechev. Large Language Models for Code: Security Hardening and Adversarial Testing. 2023. In CCS. https://arxiv.org/abs/2302.05319.\n\nErik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, and Caiming Xiong. 2023. CodeGen: An Open Large Language Model for Code with Multi-Turn Program Synthesis. In ICLR. https://arxiv.org/\nabs/2203.13474\n\nDaniel Fried, Armen Aghajanyan, Jessy Lin, Sida Wang, Eric Wallace, Freda Shi, Ruiqi Zhong, Wen-tau Yih, Luke Zettlemoyer, and Mike Lewis. 2023. InCoder: A Generative Model for Code Infilling and Synthesis. In ICLR. https://arxiv.org/\nabs/2204.05999\n\nLoubna Ben Allal, Raymond Li, Denis Kocetkov, Chenghao Mou, Christopher Akiki, Carlos Muñoz Ferrandis, Niklas Muennighoff, Mayank Mishra, Alex Gu, Manan Dey, et al. 2023. SantaCoder: Don’t Reach for the Stars! CoRR\nabs/2301.03988 (2023). https://arxiv.org/abs/2301.03988\n\nThere are many other benchmarks for evaluations of code generation that are not mentioned and compared. Please refer to the paper https://arxiv.org/html/2406.12655v1 for details." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "Code generation for cyber attacks has dual-use purpose and can be misused by malicious actors.\nI am not sure where the community sits on ethics board approval for this topic." }, "flag_for_ethics_review": { "value": [ "Yes, Privacy, security and safety" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* Please provide more details on the security tests, addressing the concerns in the weaknesses section abve - including the breadth and depth with which these tests cover potential vulnerabilities and edge cases. \n* Has any analysis of diversity across the 10 samples for each seed and the 5 test cases per sample been conducted? There might be redundancy. \n* How are the vulnerable and patched code examples used for evaluating the correctness of test cases and/or generated output?\n* Please include a comparison with LLMSecEval.\n\n**Cyber attack scenario:** \n* As outlined in the weaknesses above, please explain the motivation for creating your own simulation range and what gap in existing ranges/benchmarks yours is targeting. \n* Please provide more details on your attack refusal investigation - were other role playing prompt wordings tried that might be more persuasive for Claude? Etc." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper is tackling 2 important and timely problems at the intersection of LLMs and cybersecurity. \n•\tHaving a benchmark that includes both security and functionality unit tests for each code example is a strong contribution to the secure code generation literature. Many SOTA LLM papers in the literature currently test code security and functionality separately (ie. using separate datasets/tasks) due to lack of benchmarks with the capability to simultaneously test both. Strong and comprehensive benchmarks are definitely lacking for this problem. \n* Proposed approach to leverage LLMs to scale the development of secure code benchmark dataset. \n* Using a controlled environment to see if the model can generate commands or code that facilitate attacks -- and tracking refusal rates in research on LLM-driven pentesting and red teaming can provide insight into the effectiveness of their internal safety mechanisms." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper provides a benchmark for evaluating security issues associated with LLM generated code. Specifically covering: \ni) Secure code generation: to assess LLMs ability to generate secure code (focusing on Python). \nii) Cyber attack helpfulness: to evaluate a model’s capability in facilitating end-to-end cyberattacks.\nThey apply 4 LLMs to both benchmarks -- CodeLlama-34B-Instruct, Llama-3.1-70B, Mixtral-8×22B, GPT-4o – and compare their performance.\n\n**Secure code generation benchmark:** \nThe authors manually created 153 seed tasks covering 27 CWEs relevant to python – then used LLM-based mutators to generate variations of the tasks for each of the seeds (for large scale generation). They also include both vulnerable and patched code versions, together with functionality and security test cases for each task – resulting in a total of 1345 samples with about 5 test cases per sample. \n* They evaluate their samples on ‘prompt faithfulness’ and ‘security relevance’ – comparing with CyberSecEval and outperforming it on both. \n* They also evaluate the 4 LLMs for achieving the task’s required functionality using the pass @1 metric on the provided unit tests. And they evaluate the code security using carefully constructed security tests, including the boost in security when providing security policy info in the prompt.\n* They also evaluate Cursor on their benchmark. \n\n**Cyber attack benchmark:** \nFor this, they build a simulated environment containing a network that runs an e-commerce application. Their environment is structured similarly to a CTF, where the adversary aims to gain access to the database and steal sensitive user information. The benchmark facilitates 7 MITRE ATTACK categories. \n* They evaluate the 4 LLMs on their refusal rate to comply with generating attacks, and when attacks are generated, the attack success rate is measured." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* While a lot of work has been done for this paper and there are definitely strong contributions, by setting CyberSecEval as the goal post to beat, this paper goes too broad in scope (for a paper of this length) and fails to adequately establish its position among the existing peer reviewed literature for each of these 2 distinct research directions. There is no need for benchmarks to cover both secure code generation and cyber attack capability as they have fundamentally different objectives, setups, and evaluation metrics. In the case of CyberSecEval, combining these tasks made sense because it was aligned with their product’s goals. For SecCodePLT, however, the logical connection is less clear. Secure code generation and cyberattacks don’t share the same purpose, infrastructure requirements, or audience, and combining them into the one conference-length paper restricts the depth of each evaluation. \n\n* Overall, there is a lack of discussion/justification for the choice of prompt wording/techniques. \n\n**Secure code generation task:** \ni) Relevant benchmarks, such as LLMSecEval (MSR 2023), have been overlooked. LLMSecEval covers 18 Python-related CWEs, which challenges the authors' claim that existing benchmarks address only 8 Python-related CWEs.\nA more detailed analysis of the scope/coverage of existing peer reviewed benchmarks and where this paper fits in would strengthen this work. \nii)\tCode security testing is challenging. Many SOTA papers try to utilize a combination of SAST tools, LLM vulnerability checkers, and manual checking. The discussion of the code security tests could be more convincing if it provided detailed information on the breadth and depth with which these tests cover potential vulnerabilities and edge cases. Eg. providing a breakdown of security test cases per CWE, showing how each test targets specific security requirements and edge cases, would help demonstrate thoroughness. Or providing a metric similar to code coverage in unit testing would help show that the security tests are exhaustive. Overall I didn’t understand how the vulnerable and patched code examples are used for evaluating the correctness of test cases and/or generated output. \niii)\tPrompt quality assessments could be stronger. The Security-relevancy Judge, based on GPT-4, introduces a potential bias since the same LLM type is used in prompt construction. Using diverse models or a user study evaluation of security relevance would provide more objective evaluations. Also – leveraging, for example, LLMSecEval’s metrics for prompt clarity and relevance would enhance prompt quality assessment, ensuring that prompts are as clear and useful as possible for secure code generation. \niv) The \"instruction generation\" task was hard to understand and seems to not involve generating written instructions or explanations – but asking the model to generate the full code based on the provided prompt? \n\n\n**Cyber attack scenario:** \ni)\tThe paper lacks a comparison with existing cyber ranges and security testing frameworks like MITRE's Caldera, Metasploit, Cyber Range by IBM, etc, as well as other benchmarks like VulHub, HackTheBox, OWASP Benchmark, VulnHub, etc. The paper lacks a discussion of where they fall short, and why you needed to create your own simulation range – in general the positioning of SecCodePLT within the existing ecosystem is insufficient. What gap in existing environments are you are targeting? In what way did you customize your environment for LLM Evaluation? One of the gaps in the existing literature on LLM-driven pentesting and red teaming is lateral movement – so a simulated environment and benchmark focused on measuring LLM capability for lateral movement would have been interesting. \n•\t(Small detail- Table 2 claims that the environment facilitates seven of the MITRE attack categories – but then lists ‘weaponisation and infiltration’ - neither of which are MITRE attack categories.) \nii)\tRefusal analysis seemed to not be extensive. Motivation is not provided for the choice of role playing wording in the prompt: “I am a security researcher conducting a red teaming test on a web server”. Were other things tried that might be more persuasive for Claude? \n\n**Typos/misprints in the paper:** \n* Missing a word here on page 2? “our automatic and validate process…”. \n•\tRepeated phrase on page 3: “not even not even vulnerable”. \n•\tPage 4: “MITER” should be MITRE. \n•\tPage 6: The Figure 2 caption: ‘worn’ – perhaps should be “worm”? \n•\tPage 6: “active domain (AD) server” --- should this be Active Directory? \n•\tSection 4.2 says Figure 8 and 9 are about CyberSecEval but the figure captions say they are about SecCodePLT. \n•\tMultiple instances of “cursor” - should be “Cursor”. \n•\tPage 9: “Not that we consider cursor…” – should be “Note”." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024seccodeplt,\ntitle={SecCode{PLT}: A Unified Platform for Evaluating the Security of Code Gen{AI}},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0R3ha8oNPU},\nnote={under review}\n}" }, "abstract": { "value": "Existing works have established multiple benchmarks to highlight the security risks associated with Code GenAI.\nThese risks are primarily reflected in two areas: a model’s potential to generate insecure code (insecure coding) and its utility in cyberattacks (cyberattack helpfulness).\nWhile these benchmarks have made significant strides, there remain opportunities for further improvement.\nFor instance, many current benchmarks tend to focus more on a model’s ability to provide attack suggestions rather than its capacity to generate executable attacks.\nAdditionally, most benchmarks rely heavily on static evaluation metrics (e.g., LLM judgment), which may not be as precise as dynamic metrics such as passing test cases. \nFurthermore, some large-scale benchmarks, while efficiently generated through automated methods, could benefit from more expert verification to ensure data quality and relevance to security scenarios. \nConversely, expert-verified benchmarks, while offering high-quality data, often operate at a smaller scale.\nTo address these gaps, we develop SecCodePLT, a unified and comprehensive evaluation platform for code GenAIs' risks.\nFor insecure code, we introduce a new methodology for data creation that combines experts with automatic generation. \nOur methodology ensures the data quality while enabling large-scale generation. \nWe also associate samples with test cases to conduct code-related dynamic evaluation.\nFor cyberattack helpfulness, we set up a real environment and construct samples to prompt a model to generate actual attacks, along with dynamic metrics in our environment.\nWe conduct extensive experiments and show that SecCodePLT outperforms the state-of-the-art (SOTA) benchmark CyberSecEval in security relevance.\nFurthermore, it better identifies the security risks of SOTA models in insecure coding and cyberattack helpfulness. \nFinally, we apply SecCodePLT to the SOTA code agent, Cursor, and, for the first time, identify non-trivial security risks in this advanced coding agent." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Code Generation", "Cybersecurity", "Safety", "Large Language Models" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/4b4f8ca63ee63f77c7ca93a2493dbc63bbfcd314.pdf" }, "presentation": null, "primary_area": { "value": "datasets and benchmarks" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "SecCodePLT: A Unified Platform for Evaluating the Security of Code GenAI" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0R8JUzjSdq
LEMMA-RCA: A Large Multi-modal Multi-domain Dataset for Root Cause Analysis
main
Active
root cause analysis;multi-modal learning;microservice systems;benchmark data
datasets and benchmarks
3;3;5;5
3;5;4;4
2;2;2;3
3;1;2;2
2;3;3;3
4
4
2.25
2
2.75
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Could the authors consider including the dependency graph? Having this graph like in petshop seems like a deal breaker to me.\n\n2. Could the authors benchmark the baselines using the dependency graph instead of the causal graph inferred by the PC?\n\n3. For the CIRCA method as well, could the authors provide results based on the dependency graph?\n\n4. The experiments section needs more and a systematic explanation on why each method performed better or worse." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- Lemma-RCA is a large, multi-model and multi-domain dataset that includes data from both IT and OT domains. It has over 100,000 timestamps across several connected pods, with a rich mix of test logs and time-series data. This dataset will be valuable for testing and improving future RCA methods.\n\n- Unlike most other datasets, Lemma-RCA provides exact ground-truth labels, showing both when faults happened and the specific components responsible. \n\n- The paper builds on past studies that highlight using causal structure-based methods for RCA. The authors compare Lemma-RCA with causal discovery methods and other recent RCA models.\n\n- **Clarity and Presentation**: The paper is well-organized, with clear visuals and a smooth flow that makes it easy to understand in a single read." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces Lemma-RCA, a dataset designed for root cause analysis. Lemma-RCA has distinctive and appreciable characteristics like large-scale, multi-modal nature and spans two domains: IT and OT. It includes test logs and time-series data, capturing KPI metrics across several interconnected pods over more than 100,000 timestamps. Notably, the dataset provides ground-truth annotations for both the exact fault occurrence times and the root cause components. This level of detail makes Lemma-RCA a valuable resource for advancing research in RCA." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**Missing Dependency Graph**: A key limitation of Lemma-RCA is the absence of a dependency graph, which prior datasets like PetShop provided as a causal graph. This dependency graph is critical for RCA, as it allows more direct evaluations of causal discovery methods. The paper seems to already hint the partial dependency graph in Figure 1(a). I wonder if the authors could add the full dependency graph along with the datasets.\n\n**Insufficient Explanation of Baseline Approaches:** The paper does not include explanations of the baseline approaches used, even in the appendix. Although prior work is cited, providing brief descriptions of each benchmarked approach, particularly the high-performing REASON method, would enhance the reader’s understanding of the comparative results.\n\n\n**Limited Explanation of Experimental Results**: The experimental results focus primarily on causal discovery approaches, but they lack in-depth analysis of why these methods failed. The authors' insights and intuition about why each method achieved the numbers reported in the table could significantly enhance the understanding of the experiment section. For instance, suppose we assume that the dependency graph is the true causal graph as in PetShop. Then can the authors establish how far the PC's predicted causal graph is from the true dependency graph. This would at least give us a sense of the causal discovery performance and put the RCA results in context. For instance, if the causal discovery performance is very poor, there is no meaning in expecting the methds like PC, GOLEM, etc. to perform better in predicting root causes. Additionally, one interesting experiment to run would be evaluating the causal graph based baselines on the true dependency graph, instead of the one inferred from observational data by PC.\n\n**Choice of Baseline Algorithms:** Given that the dataset is timestamped, it cannot be assumed that each record is i.i.d. Some causal discovery methods, like those in the Tigramite package (https://jakobrunge.github.io/tigramite/), are tailored for time-series data. It is unclear why the authors chose standard PC over these alternatives, which may be more suitable for time-dependent causal discovery.\n\n\nFinally, some important prior RCA works appear to be missing among the benchmarked methods. For example, the paper by Pham et al. (2024) on BARO highlights that inaccurate RCA predictions can result when a method fails to learn the correct causal graph. Including such approaches would provide a more thorough baseline comparison and strengthen the evaluation.\n\n[1] Pham L, Ha H, Zhang H. Baro: Robust root cause analysis for microservices via multivariate bayesian online change point detection. Proceedings of the ACM on Software Engineering. 2024 Jul 12;1(FSE):2214-37." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. The authors claimed that the proposed datasets contain real system faults. If I understand correctly, the authors developed two microservice platforms and deployed them in production and collected the real system faults for 8 days when users are using these platforms. It is a bit surprising that so many faults happened in 8 days. Moreover, in the faults description section, it seems that these faults are simulated (e.g., External Storage Failure) to mimic the real world scenario. Could the authors clarify this?\n2. It seems that SWaT and WADI are from existing work. The authors applied some anomaly detection algorithms on them to transform discrete labels into continuous ones. It is not clear why this is necessary. Moreover, SWat and WADI are already evaluated for RCA in the REASON (Wang et al. KDD2023) paper. Since this is a dataset paper, including existing datasets into the proposed one should not be seen as the contribution. \n3. From experiments on existing methods, it seems that two IT ops datasets are not very challenging. For instance, REASON performs quite well in terms of PR@k, MRR and MAP@k on both of them with only the metrics data. What is the difference between the proposed datasets compared with existing ones, e.g., AIOps data in REASON and the popular train ticket datasets? When new datasets are proposed, they are expected to be more challenging, where current methods are failed on them. If current method can handle the proposed well with only metric modal, what is the meaning of including log modal?\n4. The authors conducted some preprocessing to convert logs to time series for evaluation. But the open-sourced datasets do contain all the original logs, right?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. New multi-modal datasets are collected for RCA problem.\n2. Eight existing RCA methods are evaluated on the proposed datasets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors proposed a new dataset with both metrics and log collected for the root cause analysis task. In addition, 8 existing RCA methods are evaluated on this dataset. The proposed datasets could be a good addition for evaluation of RCA methods for later research. However, it is not very clear what the benefit of including log modal data is. Existing methods work quite well on these datasets with only metrics modal." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The description of the data collection is insufficient. See Q1.\n2. Some subsets of the datasets are from existing work and have been evaluated before. They should not be seen as the contribution of this work. See Q2.\n3. The proposed IT ops datasets seems to be less challenging for existing works. See Q3." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "(1)\tThe author should provide more valuable data rather than simply assembling data. Additionally, it is not clarified whether these platforms the data come from are sufficiently representative to ensure the quality of the data and the data collection period appears to be rather short, making it difficult to establish whether the dataset adequately captures a wide range of fault patterns and system behaviors.\n(2)\tThe experiments designed by the authors do not seem sufficient to demonstrate the value of the dataset. I suggest that the authors select several widely recognized RCA methods with known performance differences and analyze whether these methods exhibit similar performance distinctions on this dataset.\n(3)\tThe author can pay more attention to the readability of the figures in the paper and the normalization of the experimental results." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "(1)\tLEMMA-RCA is the first public dataset specifically designed for root cause analysis, covering two domains—IT and OT. \n(2)\tThe paper thoroughly tests multiple existing RCA methods on LEMMA-RCA, demonstrating the dataset’s quality and multi-modal value.\n(3)\tBy making LEMMA-RCA freely accessible, the paper lowers research barriers, encouraging collaboration between academia and industry and enhancing the generalizability and practical impact of RCA methodologies." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents LEMMA-RCA, a large-scale, multi-modal, and multi-domain dataset specifically designed for Root Cause Analysis (RCA) in complex systems. The dataset includes real-world fault cases from IT and OT operational systems, covering microservices, water treatment, and distribution systems to support a wide range of RCA tasks. To validate the effectiveness of LEMMA-RCA, the authors evaluated various RCA methods on this dataset, demonstrating its diversity and utility across offline and online settings as well as single and multi-modal data scenarios." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(1)\tOne contribution of this study is the introduction of LEMMA-RCA, the first multi-domain dataset for RCA. However, the dataset includes only IT and OT domains, which appear to be a simple combination of two unrelated domains, thus raising questions about the solidity of this contribution. Additionally, the limited data collection period, such as the 11 days for the OT system, may not capture long-term trends, potentially limiting its applicability to broader fault analysis scenarios.\n(2)\tThe figures in this study are unclear, heavily relying on screenshots. \n(3)\tThe experimental analysis tables lack consistency in reporting, with varying decimal places and an absence of standard deviation reporting." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. **Clarification:**\n- The current data collection section seems tailored for domain experts and could benefit from clarification for a general audience. For instance, what do \"IT\" and \"OT\" refer to? Are \"Prometheus\" and \"ElasticSearch\" tools or companies? Clarifying the meanings of such terms would improve the accessibility. \n- Figure 2(a) would benefit from a more detailed explanation in its caption.\n- In Figure 3, what is the KPI being referenced? Should it be assumed that all KPIs in this figure relate to system latency? Please specify the y-axis further.\n- How were the root causes $V_a$ for each system fault $a$ labeled? The authors may include a section in the paper detailing their labeling methodology.\n2. **Evaluation Metrics:** The evaluation metrics appear to be sample-independent. Why did the authors not consider sample-dependent metrics? For example, over a 10-day system run yielding 1000 faults, the accuracy of the prediction algorithm could be tested against the actual root cause labels.\n\n3. **Data Quality Claims:** The authors suggest high data quality based on baseline comparisons. This conclusion seems somewhat overstated, as the main insight from these experiments appears to be that \"MRR performance improves when considering two modalities jointly.\"\n\n**Comment on Missing Data:** While the authors view missing data as a limitation, I consider it a realistic aspect of real-world data, which poses a meaningful challenge rather than a flaw." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The dataset is open-source, multi-modal, and well-suited to RCA, making it both timely and relevant.\n- The authors have provided a thorough review of existing baseline approaches." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents Lemma-RCA, a novel dataset and benchmark designed for root cause analysis (RCA). This dataset includes four sub-datasets: two from IT environments and two from OT environments, offering a large-scale, multi-modality dataset (with both KPI and log data) that captures real-world system faults. The authors validate the dataset’s quality by testing it with eight baseline models across offline single-/multi-modality and online single-modality settings." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The dataset description could be more accessible to a broader audience, as suggested in the questions below.\n- Reproducibility is limited due to insufficient implementation details for the baseline models." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024lemmarca,\ntitle={{LEMMA}-{RCA}: A Large Multi-modal Multi-domain Dataset for Root Cause Analysis},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0R8JUzjSdq},\nnote={under review}\n}" }, "abstract": { "value": "Root cause analysis (RCA) is crucial for enhancing the reliability and performance of complex systems. However, progress in this field has been hindered by the lack of large-scale, open-source datasets tailored for RCA. To bridge this gap, we introduce LEMMA-RCA, a large dataset designed for diverse RCA tasks across multiple domains and modalities. LEMMA-RCA features various real-world fault scenarios from IT and OT operation systems, encompassing microservices, water distribution, and water treatment systems, with hundreds of system entities involved. We evaluate the quality of LEMMA-RCA by testing the performance of eight baseline methods on this dataset under various settings, including offline and online modes as well as single and multiple modalities. Our experimental results demonstrate the high quality of LEMMA-RCA. The dataset is publicly available at https://lemma-rca.github.io/." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "root cause analysis", "multi-modal learning", "microservice systems", "benchmark data" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/8b70989f90f3d0060a279cb0ab42f52c4a855b19.pdf" }, "presentation": null, "primary_area": { "value": "datasets and benchmarks" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "LEMMA-RCA: A Large Multi-modal Multi-domain Dataset for Root Cause Analysis" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0RHMnPj8no
Improved Sample Complexity for Private Nonsmooth Nonconvex Optimization
main
Active
Differential privacy;nonconvex optimization;nonsmooth optimization;Goldstein stationarity
alignment, fairness, safety, privacy, and societal considerations
3;5;6;8
4;4;3;3
2;3;3;3
2;2;2;3
2;3;3;4
5.5
3.5
2.75
2.25
3
-0.83205
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. I am confused by the explanation of the improvement on the non-private term Remark 3.2. The authors explain that\n> while the optimal zero-order oracle complexity is d/αβ^3 (Kornowski & Shamir, 2024), and in particular must scale\nwith the dimension (Duchi et al., 2015), the sample complexity might not.\nSince the algorithm is one-pass, then the sample complexity would be worse than the oracle complexity?\n2. Is Online-to-Non-Convex conversion optimal? (Related to the weakness above) If not, any algorithms based on it will be suboptimal." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper is well-structured and easy to follow.\n- The results improve over prior state of the art, establishing the first DP NSNC ERM algorithm with sublinear dim-dependent sample complexity. The non-private term is dimension-independent, which improves over the previous dimensional dependent result in Zhang et al. 2024." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents algorithms to improve sample complexity in differentially private (DP) nonsmooth, nonconvex (NSNC) optimization. The authors propose two zero-order algorithms that improve the results over Zhang et al. 2024\n1. Single-pass, sqrt(d) improvement over Zhang et al. 2024. The authors also establish a dimension-independent “non-private” term, which is not known before for NSNC DP optimization.\n2. A multi-pass algorithm further improves the sample complexity, yielding the first algorithm to preform private ERM with sublinear dimension-dependent sample complexity for NSNC objectives.\nAdditionally, the authors show that Goldstein-stationarity generalizes from the ERM to the population." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My main concern is about contextualizing the contribution:\nLike Zhang et al. 2024, this paper also heavily relies on “Online-to-Non-Convex conversion” (O2NC) of Cutkosky et al. (2023). The authors also mention that a lower bound is unknown, making it hard to assess the contribution beyond incremental improvement.\n\nA discussion of the Tree Mechanism is missing. It would be very hard for readers not familiar with the Tree Mechanism to understand.\nTypo: page 2 and in particular is the first algorithm to [preform] private ERM" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "- Why did the authors decide to use Gaussian Mechanism instead of the tree mechanism for ERM?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper is a fairly simple extension of previous results. The authors are able to improve the past sample complexity by an order of $O(\\sqrt{d})$ by using a high probability subgaussian bound on the sensitivity of the queries. \n\n- The paper also extends the results to other settings.\n\n- The generalization statement (Proposition 5.1) is a cool result to show the validity of the ERM approach for solving the Goldstein-stationary point.\n\n- The paper is well written overall." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the problem of designing Differentially Private Algorithms for Nonsmooth Nonconvex Optimization. It specifically studies the zeroth order settings. Thanks to more careful analysis, the paper is able to improve on the dimension dependence of the previous results. It also extends the past result to the ERM settings." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper is basically using the same algorithm proposed by [1]. This is not a huge issue since they are able to make some nice modifications to improve the sample complexity. However, this does limit the potential impact of the paper.\n\n- I also think $m$ is quite large, which would make it really inefficient to run in practice. Currently, m can be something like $O(d^2T^{4/5})$, which is very hard to do in practice.\n\n- It would be interesting if there were some matching upper bounds.\n\n[1] Zhang, Qinzi, Hoang Tran, and Ashok Cutkosky. \"Private zeroth-order nonsmooth nonconvex optimization.\" arXiv preprint arXiv:2406.19579 (2024)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The tree mechanism in Algorithm 1 is hard to understand and seems logically inconsistent. Specifically, regarding the function NODE, what is its intended purpose? There appears to be no defined output for NODE. Moreover, in line 13, $k'$ is assigned a value greater than $k$, however, line 14 subsequently tests the condition $k'\\le k$, which can never be true. As a result, $S$ remains an empty set and is never updated.\n\n2. In the fourth line of Proposition 2.5, for calculating each $X_i$, should it instead use $\\sum_{j=1}^i M_j$ rather than the expression given in the paper $\\sum_{j=1}^i M_i$?\n\n3. In (Cutkoskyetal.,2023), $\\Delta_{t+1}$ is updated by $\\Delta_{t}+\\eta g$ (as stated in their Remark 10), while in Algorithm 2 line 8, $\\Delta_{t+1}$ is updated by $\\Delta_{t}-\\eta g$. Could you clarify the rationale behind this difference?\n\n4. In Theorem 3.1, while the sample complexity has been reduced by a factor of $\\Omega(\\sqrt{d})$ compared to (Zhang et al., 2024), this comes at the expense of increasing the number of random directions $m$ from $d$ to $d^2$, potentially resulting in a longer runtime." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper studies an important problem in DP non-convex optimization, and achieves improved sample complexity bounds over existing works." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper explores differentially private (DP) optimization algorithms for stochastic and empirical objectives that are non-smooth and non-convex, presenting methods that achieve Goldstein-stationary points with improved sample complexity bounds compared to prior work. The authors introduce a single-pass ($\\epsilon$,$\\delta$)-DP algorithm capable of producing ($\\alpha$,$\\beta$)-stationary points. Subsequently, they propose a multi-pass, polynomial-time algorithm that further refines sample efficiency by designing an effective ERM algorithm and demonstrating that Goldstein-stationary points can generalize from the empirical to the population loss." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The presentation is at times unclear, leading to a disjointed reading experience.\n\nAdditionally, the paper offers limited technical innovation. Most of the algorithmic framework and techniques appear to be adapted from previous works." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Is the result in the paper tight? In other words, is there a lower bound provided?\n\n2. What is the key challenge in improving the result by at least $\\(\\sqrt{d}\\)$? Specifically, how does this improvement compare to the results in the referenced work?\n\n3. What role does the (α, β)-Goldstein stationary point play in this paper?\n\n4. What is the novelty of this paper compared to previous works?\n\n5. Can you explain more about the result regarding the non-private term and private and how they contribute to the final result?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. **Significant Improvement in Sample Complexity** \n The paper offers a substantial reduction in sample complexity for differentially private (DP) nonsmooth nonconvex (NSNC) optimization. The single-pass algorithm achieves a lower dependence on dimension \\(d\\) compared to prior work, which is highly impactful for high-dimensional problems in machine learning.\n\n2. **Innovative Use of Goldstein-Stationarity** \n By focusing on Goldstein-stationary points, the authors leverage a nuanced stationarity condition suitable for nonsmooth nonconvex optimization, allowing for more practical solutions where traditional gradient-based methods fall short. This approach builds on and expands the utility of Goldstein-stationarity in DP settings.\n\n3. **Generalization from Empirical to Population Loss** \n The paper addresses a theoretical gap by proving that empirical guarantees of Goldstein-stationarity translate to the population loss. This generalization strengthens the theoretical foundation and practical relevance of the proposed algorithms, as it ensures that results on empirical data apply to broader distributions.\n\n4. **Applicability to Real-World DP Machine Learning Tasks** \n The proposed algorithms are zero-order (using only function evaluations) and thus avoid the need for gradient information, making them suitable for a wider range of machine learning models that may have nonsmooth, nonconvex loss landscapes. This approach is particularly beneficial in privacy-sensitive applications like federated learning.\n\n5. **Novel Dimension-Independent Term** \n The single-pass algorithm introduces a dimension-independent term in the \"non-private\" component of the sample complexity, challenging previous assumptions in DP optimization for NSNC objectives. This innovation indicates potential for further sample complexity improvements and opens new directions for DP research in nonconvex settings." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses differential privacy (DP) in nonsmooth, nonconvex optimization, aiming to improve sample complexity for finding Goldstein-stationary points in such challenging settings. Traditional DP optimization methods often assume convexity or smoothness, but this work proposes new algorithms that can handle nonsmooth nonconvex (NSNC) objectives.\n### Key Contributions\n\n1. **Single-Pass Algorithm** \n The authors present a single-pass (ε, δ)-DP algorithm that finds an (α, β)-stationary point with improved sample complexity. This algorithm reduces dimensional dependence by a factor of \\(\\Omega(\\sqrt{d})\\) over previous approaches, making DP optimization feasible in high-dimensional settings while maintaining privacy guarantees.\n\n2. **Multi-Pass Algorithm** \n A multi-pass ERM-based algorithm further enhances sample efficiency, allowing the algorithm to iterate over the data multiple times and achieve sublinear dimension-dependent sample complexity. This approach improves convergence while satisfying DP constraints.\n\n3. **Generalization from ERM to Population Loss** \n The authors establish that Goldstein-stationarity achieved on empirical loss also applies to the population loss with high probability. This result expands the utility of their approach by ensuring that empirical results generalize to the population.\n\n\nThe proposed algorithms make notable progress in DP optimization for NSNC problems, improving sample efficiency while maintaining privacy. This advancement is valuable for practical applications where data privacy is essential, especially in high-dimensional machine learning settings. Additionally, the generalization result strengthens the applicability of Goldstein-stationary points beyond empirical settings." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. There is some typo, for instance, line 077 the last word should be perform.\n\n2. Randomized Smoothing is an ordinary technique used in this setting, and I wonder the novelty except for this to deal with the non-smooth setting." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Better algorithms for differentially private nonsmooth nonconvex optimization" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024improved,\ntitle={Improved Sample Complexity for Private Nonsmooth Nonconvex Optimization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0RHMnPj8no},\nnote={under review}\n}" }, "abstract": { "value": "We study differentially private (DP) optimization algorithms for stochastic and empirical\nobjectives which are neither smooth nor convex, and propose methods that return a Goldstein-stationary point with sample complexity bounds that improve on existing works.\nWe start by providing a single-pass $(\\epsilon,\\delta)$-DP algorithm that\nreturns an $(\\alpha,\\beta)$-stationary point as long as the dataset is of size $\\widetilde{\\Omega}\\left(1/\\alpha\\beta^{3}+d/\\epsilon\\alpha\\beta^{2}+d^{3/4}/\\epsilon^{1/2}\\alpha\\beta^{5/2}\\right)$,\nwhich is $\\Omega(\\sqrt{d})$ times smaller than the algorithm of \\citet{zhang2023private} for this task,\nwhere $d$ is the dimension.\nWe then provide a multi-pass polynomial time algorithm which further improves the sample complexity to $\\widetilde{\\Omega}\\left(d/\\beta^2+d^{3/4}/\\epsilon\\alpha^{1/2}\\beta^{3/2}\\right)$,\nby designing a sample efficient ERM algorithm,\nand proving that Goldstein-stationary points generalize from the empirical loss to the population loss." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Differential privacy", "nonconvex optimization", "nonsmooth optimization", "Goldstein stationarity" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/5af538bb86ccdb7a44a769f156af340e94ec1c92.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Improved Sample Complexity for Private Nonsmooth Nonconvex Optimization" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0RUQmLFF1D
Is What You Ask For What You Get? Investigating Concept Associations in Text-to-Image Models
main
Active
text-to-image;vision-language;computer vision;interpretability;alignment;fairness;safety
alignment, fairness, safety, privacy, and societal considerations
3;5;6;6
3;3;4;4
2;2;3;4
2;2;3;3
2;3;4;4
5
3.5
2.75
2.5
3.25
0.816497
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Could you clarify the conceptual and mathematical connection between the marginal and conditional distributions in Equation 3 and the summarization metrics in Equations 4-6? An explanation of how these are linked would help in understanding the core framework.\n\n- Since the marginal and conditional distributions are defined for continuous distributions, while the summarization metrics are based on discrete cases, could you provide a derivation or rationale that bridges these two? How do you address this foundational difference?\n\n- You mention handling uncertainty from object detectors by sampling from a distribution of concepts, but the practical details of this approach are unclear. Could you elaborate on how this sampling is implemented and how effective it is in managing detection uncertainty?\n\n- Given the similarities between concept frequency, concept stability, and concept co-occurrence and the metrics used in counterfactual explanations (e.g., validity, proximity, and diversity), could you discuss any connections or differences between your proposed metrics and those commonly used in counterfactual work?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- This work addresses the important challenge of auditing text-to-image (T2I) models to assess their reliability, fairness, and bias.\n- The authors introduce an interpretation of concept distributions, which forms the basis for their marginal and conditional distribution notations.\n- Through various case studies—including bias analysis, disability representation, and model misalignment—the authors explore essential aspects of T2I model auditing." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose Concept2Concept, a framework that characterizes the conditional distributions of vision-language models using interpretable concepts and metrics. This enables systematic auditing of both models and prompt datasets. Through case studies, they analyze various prompt distributions, including user-defined and real-world examples. Concept2Concept is also an open-source interactive visualization tool, making it accessible to non-technical users." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The primary innovation of the paper lies in interpreting distributions over concepts, leading to the marginal and conditional distributions defined in Equation 3 and summarization metrics in Equations 4-6. However, the connection between these two sets of equations is not well-explained, making it difficult to understand how they are conceptually or mathematically linked.\n\n- Although marginal and conditional distributions are defined for continuous distributions, the summarization metrics—concept frequency, concept stability, and concept co-occurrence—are framed in discrete terms. The authors do not provide a derivation or proof to clarify the connection between continuous and discrete cases, leaving this foundational aspect unclear.\n\n- The authors mention addressing uncertainty from off-the-shelf object detectors by sampling from a distribution of concepts. However, they provide little information on the practical implementation of this approach, making it challenging to interpret how this sampling is achieved or how effective it is in managing uncertainty.\n\n- To address the uncertainty introduced by the object detector, the authors need a more comprehensive analysis, particularly in handling cases where the detector may be over-confident or under-confident. A systematic empirical study to quantify and validate this uncertainty would greatly improve clarity and demonstrate how well the framework manages these corner cases.\n\n- The metrics introduced by the authors—concept frequency, concept stability, and concept co-occurrence—resemble the validity, proximity, and diversity metrics used for counterfactual explanations as defined in [1]. However, there appears to be no discussion connecting these proposed metrics to previous work on counterfactual explanations.\n\n[1] Explaining Machine Learning Classifiers through Diverse Counterfactual Explanations" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "-" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- The paper is well motivated and well written\n- The proposed methods and metrics are simple and intuitive\n- It is nice that the paper reproduces the findings of prior works using a different evaluation framework\n- The paper has some important and worrying finding such as NSFW data in a human preferences dataset\n- Open sourcing such a framework would be very useful for practitioners" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work introduces a framework for auditing T2I models and datasets. The framework uses discriminative models to find objects or concepts in generated images. Using the proposed method, the findings of several works that explore the biases of T2I models can be reproduced. Furthermore," }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- My main concern is that there is an existing work [1], that has not been acknowledged. It introduces a similar method that uses discriminative models to find co-occurences and biases in the generations T2I models, somewhat limiting the contributions of this work. Nonetheless, I think the other contributions and alaysis of this paper still have merit. \n- The method section has too much fluff and introduces too many concepts that are not used later on. For example, the concept co-occurence formula is never being used, and the concept stability is never explored in the main part of the paper.\n- Figure 3: Methodologically, it is not clear what the prompt revision means. Are some concepts used as negative prompts?\n\n[1] OpenBias: Open-set Bias Detection in Text-to-Image Generative Models, CVPR 2024" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please refer to the Weakness section." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper provides insightful and valuable findings concerning harmful associations present in popular datasets, offering critical observations that can guide future research and model development. The topic itself is highly relevant, and the authors’ motivation is clearly articulated, underscoring the importance of addressing these issues." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper systematically examines the associations between text prompts and generated image content in a human-understandable way. Its goal is to audit text-to-image models to ensure they produce desirable and task-appropriate images. The authors propose a framework called Concept2Concept that 1) extracts high-level concepts from generated images and 2) calculates concept distribution to uncover associations between prompts and generated images. Using this framework, the authors have identified potentially harmful associations in popular datasets like Pick-a-Pic." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "One limitation of this paper is that the overall framework still relies on human examination and investigation, which may impact its scalability. \n\nThe technical and theoretical contributions are fair but could be strengthened. Further elaboration on the differences from existing work would help to clarify the novelty of this framework. As it stands, the paper resembles more of a technical application report than a traditional academic paper. To demonstrate the framework’s utility, the authors present five case studies that effectively showcase its application; however, they lack cross-model analysis, which would add depth to the evaluation. Using concepts as tools to analyze bias in text-to-image (T2I) models holds strong potential, and it would be beneficial for the analysis to extend into other domains, such as ethnicity, offering a more comprehensive evaluation across multiple models and datasets. The current five case studies, though useful, may fall short of meeting the quality criteria expected in a top conference.\n\nBesides, why there is no information about the used T2I model in the main paper? and in the appendix, there is no discussion about the choice of the model and no discussion about the different comparisons of different models \n\nAdditionally, there are minor typos (e.g., in Line 236, Figure 2) that could benefit from correction." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "See weaknesses W1 to W3." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper is very well written and easy to follow. The authors handle the sensitive topics with the appropriate sense of responsibility.\n- The selected applications of the method as well as the results are very interesting and I hope will spark a discussion in the communities using the respective datasets. \n- The presented framework increases evaluation robustness due to the three presented metrics, evaluating the relationship between prompts and generated images from different perspectives." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces Concept2Concept, a framework for auditing text-to-image models by analyzing the associations between generated images and prompts using interpretable concepts. It helps uncover biases, harmful content, and unexpected associations in models and datasets, demonstrated through various case studies." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**W1:** To my knowledge, there is no anonymous code repository provided with the paper, neither for the experiments nor the tool. As a result, I am unable to comment on the tool's usefulness. It would be beneficial if the experiments could be replicated either within the tool or through a dedicated repository to also validate the correctness of the results.
\nThe selection and robustness of the results regarding the T2I and VLM detector models are in my opinion just weakly addressed: \n\n**W2:** Only in the Appendix, it is revealed that the audited T2I model is Stable Diffusion 2.1. This information should be part of the main manuscript as the results of Study 4 only apply to this model architecture. It would be interesting how results would change for other model architectures, as especially closed-source models are strongly safety fine-tuned. If I understand correctly your framework is model-agnostic and could also be applied to closed-source models accessed via API calls. Did you perform any experiments with other model architectures? And if not please argue in the manuscript why you restrict Application 1 to this specific model architecture.\n\n**W3:** While the authors acknowledge that the detection model introduces uncertainty in the extracted concepts (Line 132), they do not address how sensitive the application results are to the choice of the detector model. Could specific concepts be overlooked if a different grounding model is used? Additionally, how does the safety fine-tuning of the detection model potentially conflict with the task of identifying sensitive concepts, such as in CSAM?\n\nI am open to raising my score if the identified weaknesses are either adequately addressed through revisions to the manuscript or convincingly argued to be non-issues.\n\nComments:\n- There is a closed bracket missing in line 187 „P(c“\n- There is a closed bracket too much in line 236 „Figure 2)“." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024is,\ntitle={Is What You Ask For What You Get? Investigating Concept Associations in Text-to-Image Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0RUQmLFF1D},\nnote={under review}\n}" }, "abstract": { "value": "Text-to-image (T2I) models are increasingly used in impactful real-life applications. As such, there is a growing need to audit these models to ensure that they generate desirable, task-appropriate images. However, systematically inspecting the associations between prompts and generated content in a human-understandable way remains challenging. To address this, we propose Concept2Concept, a framework where we characterize conditional distributions of vision language models using interpretable concepts and metrics that can be defined in terms of these concepts. This characterization allows us to use our framework to audit models and prompt-datasets. To demonstrate, we investigate several case studies of conditional distributions of prompts, such as user defined distributions or empirical, real world distributions. Lastly, we implement Concept2Concept as an open-source interactive visualization tool facilitating use by non-technical end-users. *Warning: This paper contains discussions of harmful content, including child sexual abuse material and NSFW\nmaterial, which may be disturbing to some readers." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "text-to-image", "vision-language", "computer vision", "interpretability", "alignment", "fairness", "safety" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/2559d4dd17a6377a301d588720f9cf368a35724c.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Is What You Ask For What You Get? Investigating Concept Associations in Text-to-Image Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0Ra0E43kK0
CaLMol: Disentangled Causal Graph LLM for Molecular Relational Learning
main
Active
Molecular Relational Learning;Large language Model;Graph Neural Network;Causal Learning
applications to physical sciences (physics, chemistry, biology, etc.)
3;3;3;6
4;5;4;4
3;3;2;3
2;3;2;2
2;3;2;3
3.75
4.25
2.75
2.25
2.5
-0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Could the authors provide additional analysis on the computational complexity of CaLMol? How about the comparison with these baselines in training time and inference time?\n- More detail about interpretability cases and analysis should be provided to support the advantage of CaLMol.\n- In Table 1, it is evident that the three datasets for DDI classification present a highly imbalanced binary classification task; however, the results shown for CaLMol in Table 2 perform poorly on AUC-ROC, which is a crucial metric for imbalanced data.\n- Given the model’s dependency on selected datasets, how would the authors suggest extending the approach to larger and more diverse datasets? For example, Drug-Target Interaction (DTI) is also a significant task in drug discovery; demonstrating that CaLMol is useful in this task would enhance its practical significance." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- This paper combines causal disentanglement and semantic alignment between GNN and LLM, allowing for a comprehensive understanding of molecular interactions.\n- By targeting unseen molecules, CaLMol addresses an important area in MRL, providing potential for applications involving new drugs or compounds.\n- The model is evaluated across multiple datasets, showing improvements in accuracy over several baselines, which demonstrates its effectiveness in specific zero-shot tasks.\n- The paper is well-written and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents CaLMol, a model for molecular relational learning (MRL) that uses a combination of Graph Neural Networks (GNNs) and Large Language Models (LLMs) to predict drug-drug (DDI) and solute-solvent (SSI) interactions in a zero-shot setting. The model’s innovative approach in leveraging causal disentanglement and aligning molecular structures with semantic information provides a promising direction." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "See Questions." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "As stated in the Weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This work presents CalMol, a molecular relationship learning framework based on large models and disentanglement, which achieved comparative performance on the DDI task and notable performance on the SSI task. Extracting the causal substructures of molecules is an interesting topic." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work presents CalMol, a molecular relationship learning framework based on large models and disentanglement. CalMol consists of two main parts: a causal substructure extraction module and a multimodal large model fusion module. The causal substructure extraction module learns the core substructures of molecules by decomposing the target molecule and studying the substructures in contact between pairs of molecules. The multimodal large model fusion module integrates natural language instructions with SMILES and graphical representations of molecules and core substructures into LLM for downstream tasks by constructing prompts. This work is based on MolTC, with the addition of a causal substructure extraction module. The authors evaluated CalMol on DDI (drug-drug interaction) and SSI (solute-solvent interaction) tasks, where CalMol achieved comparative performance on the DDI task and notable performance on the SSI task." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The authors believe that existing methods rely on \"variant molecular structures\", which hinders their performance, but there is a lack of a clear definition of \"variant molecular structures\".\n2. For a molecule, the substructures that play a key role may vary when it binds with different molecules, i.e., the so-called core substructures are not fixed. Therefore, it is not rigorous enough to determine the core substructures of a molecule with just one set of relationships.\n3. Using a substructure of a molecule as its causal substructure is somewhat far-fetched, especially for larger molecules.\n4. The supervision signal and loss function used in the substructure learning stage are unclear.\n5. The authors propose to make the disentangled spurious part S approach a random distribution, but the rationale for doing so is not explained.\n6. There is a lack of necessary ablation experiments, such as whether the disentanglement module is effective and whether the several disentanglement losses are necessary." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Please provide specific examples of existing MRL methods that make this assumption about molecular distributions, or clarify precisely what is meant by \"molecular distributions\" in this context. Are the authors referring to \"element distribution\" or \"atom distribution\"? Providing this clarification will help address the concern more directly and substantiate the authors' claims.\n\n\n2. The model input includes both the molecular graph information and the SMILES representation; it seems an additional ablation study is needed to demonstrate the effectiveness of both modalities like MolCA .\n\n\n3. After obtaining the substructure based on causal theory, why is it necessary to input it into a large language model rather than making a direct prediction? Does this approach truly improve the final predictive results? Furthermore, while the manuscript mentions that llm could enhance interpretability, I could not find any experiments or examples to support this claim.\n\n\n4. With the introduction of a LLM, the model's complexity and resource consumption should be compared with that of conventional models to verify the necessity of incorporating LLMs, allowing for a more comprehensive evaluation.\n\n\n5. More llm-based model are needed as baseline to verify CALMOL's performance.\n\n\n\n[1] MolTC: Towards Molecular Relational Modeling In Language Models;\n\n[2] MolCA: Molecular Graph-Language Modeling with Cross-Modal Projector and Uni-Modal Adapter" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The starting point of this paper is interesting; exploring causal substructures with large models is indeed an engaging and meaningful topic.\n2. Generalization and Robustness: By leveraging invariant relationships across molecular structures and text, CALMOL effectively addresses distribution shifts between known and new drugs, thus enhancing generalization to unseen molecules. CALMOL maintains consistent performance across various dataset splits (Section 4.1)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces CALMOL, a causally disentangled invariant graph large language model (LLM) tailored for molecular relational learning (MRL), with a particular focus on zero-shot scenarios requiring predictions of new molecular interactions. By integrating Graph Neural Networks (GNNs) with LLMs, CALMOL captures causal structural relationships and aligns molecular structures with semantic information, thereby improving predictions in drug design and molecular interaction studies. Overall, this paper is highly intriguing and meaningful, but there are several issues that require attention." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. **Assumption on Molecular Distributions**: The paper claims that most existing MRL methods assume the same molecular distributions. However, I rarely encounter papers that explicitly make assumptions about molecular distributions, and the term \"molecular distributions\" is somewhat ambiguous, requiring further clarification. To substantiate this claim, I would recommend that the authors provide specific examples of existing MRL methods that make this assumption or clarify precisely what they mean by \"molecular distributions\" in this context.\n\n2. **Effectiveness of Molecular Feature Extraction**: The model only uses SMILES information during the modality alignment process, yet SMILES is also provided in the input. This raises questions about the effectiveness and actual contribution of molecular graph feature extraction. I suggest the authors clarify the role and contribution of molecular graph feature extraction in their model, given that SMILES information is used in multiple stages. An ablation study or analysis showing the added value of graph feature extraction over using SMILES alone would be helpful in addressing this concern.\n\n3. **Novelty of the Method**: The method’s novelty is questionable; the paper seems to merely link motif sets’ causal motif extraction with LLMs in a fairly straightforward manner, without a clear motivation. Additionally, the paper claims that the LLM provides further interpretability, yet no relevant case study is provided in the experimental section to support this. I suggest that the authors provide a more detailed comparison with existing methods that combine causal motif extraction and LLMs, highlighting any specific innovations in their approach. Including a case study or examples demonstrating the enhanced interpretability claimed for their LLM-based approach would strengthen the paper.\n\n4. **Interpretability Challenges**: While CALMOL offers causal substructure explanations, the interpretability of predictions could be improved. Providing more detailed analyses or visual examples would better illustrate how causal substructure disentanglement directly impacts interaction predictions (Section 3.1). This could offer greater clarity on the added interpretability benefits of the model.\n\n5. **Dependency on LLMs**: Due to computational demands, CALMOL’s reliance on large language models may limit its applicability in resource-constrained environments. Furthermore, the paper does not clearly demonstrate any significant advantage of LLMs in this domain. I suggest the authors provide a more detailed discussion of the computational requirements of their model, ideally comparing performance versus computational cost with non-LLM methods. Specific examples or analyses that demonstrate the unique advantages that LLMs bring to molecular relational learning tasks would also help to substantiate this aspect." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "see the weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The topic is valuable and interesting. Introducing functional substructures based on LLM makes it intuitive to predict potential molecular interactions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a method to keep the invariant between molecular structures and semantic texts under a zero-shot scenario. The topic is interesting, and the experimental results look positive. Unfortunately, the paper is vague and lacks clarity both in the description of the technical approach and in the construction of the proposed datasets used for training." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. How to introduce supervised signals to optimize the weights between motifs from different molecules is confusing, and it is suggested that the authors provide more details to clarify the principles of calculating the weights between motifs, and what the symbol \\hat{Y}_C, \\hat{Y}_S, \\hat{Y} stand for.\n\n2. The core idea of CalMol is similar to MolTC[1], the authors should clarify the key difference between them.\n\n3. The ablation study is limited, the authors should further discuss the contribution of the LLM backbone. Besides, the contribution of casual GNN is weak in the DDI prediction task, but it shows strong promotion on SSI prediction, the authors can discuss this phenomenon.\n\n[1] Fang, J., Zhang, S., Wu, C., Yang, Z., Liu, Z., Li, S., ... & Wang, X. (2024). Moltc: Towards molecular relational modeling in language models. arXiv preprint arXiv:2402.03781." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024calmol,\ntitle={Ca{LM}ol: Disentangled Causal Graph {LLM} for Molecular Relational Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0Ra0E43kK0},\nnote={under review}\n}" }, "abstract": { "value": "Molecular Relational Learning (MRL), focused on understanding interactions between molecular pairs, is essential for drug design by utilizing both structural properties and textual knowledge, such as expert documents. However, most existing MRL methods assume static molecular distributions, meaning the distributions remain consistent across training and testing stages. This assumption may lead to the exploitation of variant correlations between structures and texts regarding interactions, thereby failing in the ubiquitous scenarios involving new drug predictions. To bridge this gap, we investigate zero-shot MRL by leveraging invariant relationships between molecular texts and structures w.r.t interactions for new molecules, which is largely unexplored in the literature and is highly non-trivial with following challenges: 1) How to disentangle molecular structure components between each pair to intrinsically determine interactions and address potential structural distribution shift issues for new drugs? 2) How to align molecular structures with semantic textual information to achieve invariant molecular relation predictions for new drugs? To tackle these challenges, we propose a novel Causally Disentangled Invariant Graph Large Language Model (LLM) for Molecular Relational Learning (CaLMol), capable of exploiting invariant molecular relationships to predict interactions for new drugs. Specifically, we propose Causal Molecule Substructure Disentanglement to capture the invariant well-recognized substructure pair for a specific molecule interaction. Then, we propose Molecule Structure and Property aware LLM Alignment to use molecule (with invariant substructure)-textual property pair to align structure information to semantic information, and use them together to guide the interaction prediction. On this basis, LLM can also provide further explanations.\nExtensive experiments on qualitative and quantitative tasks including 8 datasets demonstrate that our proposed CaLMol achieves advanced performance on predicting molecule interactions involving new molecules." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Molecular Relational Learning", "Large language Model", "Graph Neural Network", "Causal Learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/785da5d363c85fc8f02dd33b6312f6ea3ae4e440.pdf" }, "presentation": null, "primary_area": { "value": "applications to physical sciences (physics, chemistry, biology, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/b5583abb6f5caca20b32a8131ee5e3d6522e8910.pdf" }, "title": { "value": "CaLMol: Disentangled Causal Graph LLM for Molecular Relational Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0RgLIMh94b
Diffusion Curriculum: Synthetic-to-Real Data Curriculum via Image-Guided Diffusion
main
Active
Synthetic data;Curriculum Learning;Diffusion Models
generative models
3;3;5;5
4;4;4;4
2;2;3;3
2;2;3;2
2;2;3;3
4
4
2.5
2.25
2.5
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please respond to the weaknesses listed. Given the many clarification required I consider this work below the acceptance bar." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The method is simple and seems to work for both long-tailed and low-quality image classification." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a curriculum learning (CL) method that leverage synthetic image generation as data augmentation in combination with CL algorithm to tackle data-challenging tasks, e.g., long-tailed and low-quality distribution learning. The proposed method, DisCL, generattes images using both text and real images conditioning, with various image guidance scales to regulate the similarity to the real image, allowing for control over the hardness/complexity of the generated sample. CL is applied to select which complexity of samples (image guidance scale) to use based on the task at hand, e.g., for long tail learning an diverse-to-specific CL algorithm is used, while for low-quality image learning an adaptive algorithm is used. A first set of experiments compare DisCL versus baselines using data augmentation or balanced softmax for long tailed classification, showing positive impact mostly on less represented classes. A second set of experiments, test DisCL in the task of low-quality images using the iWildCam dataset. Here, DisCL is plugged to state-of-the-art fine-tuning techniques to show improved performance on both out-of-distribution and in-distribution examples." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Lacking of some relevant related works. The method does not mention nor compare against methods that already tried to leverage synthetic data as data augmentation to cope with unbalanced data, e.g., Hemmat-Askari et al 2023, or for representation learning / classification e.g., Tian et al 2024a and 2024b, Astolfi et al 2023. In particular, Hemmat-Askari et al 2023 seems quite related as they target the same task and use similar synthetic data generation approach while having some sort of adaptive curriculum learning (feedback guidance) which regulates the type of generation needed by the model. It would be nice to understands how DisCL compare against it. Finally, ALIA ( Dunlap et al 2023) is mentioned as a related work and a baseline in 3.1.2, but never presents in the results.\n\n2. Weak / unclear experimental settings:\n - Resnet-10 choice is motivated for the comparison with LDMLR; However, most of the comparisons are with CUDA, which uses resent-32 for CIFAR-100 and resenet-50 for ImageNet. Do you expect you results to hold with these larger resnet?\n - Some experimental details are unclear to me. \n - it is not clear to me whether baselines and DisCL are trained for the same amount of iterations/epochs. \n - In the training details the authors say: _\"To preserve a constant imbalance-ratio throughout all training stages and experiments, we undersample the non-tail samples at \"each stage\" so that ratio of tail-samples to non-tail samples matches the proportion of tail classes to non-tail classes present in the original data (13.6%).\"_. If I am reading this correctly the authors say that they prefer to maintain imbalanced the dataset, despite having the possibility to rebalancing it with synthetic data. Why this choice?\n - The results on ImageNet-LT show small improvements w.r.t. to balanced softmax (BS) baseline (+1.5%). By looking at Hemmat-Askari et al 2023 results, the BS baseline is outperformed by a large marging. I understand that the number of generated data on Hemmat-Askari et al is on another scale (1.3M vs. 25K). Do you think the scale is enough to justify this difference? \n - Also, combining BS with DisCL sometimes leads to lower results than CE + DisCL (see Table 2). Is there any intuition why BS does seem to be as effective for DisCL\n - Bolding in table 2 is inconsistent\n\n\n*_Tian, Yonglong, et al. \"Stablerep: Synthetic images from text-to-image models make strong visual representation learners.\" Advances in Neural Information Processing Systems 36 (2024)._\n* _Tian, Yonglong, et al. \"Learning vision from models rivals learning vision from data.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024._\n* _Hemmat, Reyhane Askari, et al. \"Feedback-guided data synthesis for imbalanced classification.\" arXiv preprint arXiv:2310.00158 (2023)._\n* _Astolfi, Pietro, et al. \"Instance-conditioned gan data augmentation for representation learning.\" arXiv preprint arXiv:2303.09677 (2023)._\n* _Dunlap, Lisa, et al. \"Diversify your vision datasets with automatic diffusion-based augmentation.\" Advances in neural information processing systems 36 (2023): 79024-79034._" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "In the Table 2 and Table 3, can you provide the results of CE + Text-only Guidance and CE + All-Level Guidance." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The experiment is well designed.\n2. The visualization is good.\n3. A substantial improvement has been achieved for some tasks.\n4. Combine the curriculum learning into generative data augmentation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper tries to incorporate the curriculum learning technique into image data augmentation. \n\nThis paper evaluated the proposed method on two tasks: long-tail classification and image classification with low-quality data to show the effectiveness.\n\nContribution: This paper reveals that curriculum learning is a way to balance synthetic data with various quality and real data." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. In line 87, \"We harness image guidance in diffusion models to create a spectrum of synthetic-to-real data\". I don't think this is a contribution of yours. In ICLR 2023, one paper called \"IS SYNTHETIC DATA FROM GENERATIVE MODELS READY FOR IMAGE RECOGNITION?\" has already proposed to leverage both image and text guidance for data augmentation, and there are a lot of following works.\n\n2. In the method part, most words are recalling the diffusion theory and image-text guidance, which are both not your contribution. I think the main contribution is how to leverage the various quality data with curriculum learning. However, the Sec. 3.2 is quite short and simple.\n\n3. In the ablation part of Table 1, compared with CE + Text-only Guidance (39.10% overall accuracy) and All-Level Guidance (39.40% overall accuracy), the CE + DisCL gets very limited improvement." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Are ‘diverse to specific’ and ‘easy to hard’ curriculum strategies the same? If so, why are they called differently?\n- In Table 1, for the “Few” class, the impact of DisCL is more significant when using Cross Entropy compared to when Balanced Softmax is used. Why?\n- One of the benchmarks for learning from low-quality data is ALIA. Which Table contains the results with ALIA?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The authors address the gap between real and synthetic data generated using diffusion models by designing a generative curriculum that can adjust the quality, diversity, and difficulty of the data for different training stages. This provides a new perspective on generative data augmentation, given that the majority of prior work considered a fixed image guidance scale throughout the training.\n- The effectiveness of generative data augmentation strategies primarily depends on the performance of the generative model. To address the potential shortcomings of the generative models and thereby improve the effectiveness of the data augmentation, the authors proposed using CLIPScorer to filter out low-fidelity images." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Training deep learning models with low-quality or limited amounts of data often results in overfitting or suboptimal performance. To overcome this challenge, data augmentations have been an integral part of training deep learning models. However, classical data augmentations offer limited diversity and may also result in out-of-distribution samples, hampering the performance of the model. Therefore, recent research has focused on using generative models for data augmentations. Building in this direction, the authors propose a method to create a spectrum of interpolations between synthetic and real images called Diffusion Curriculum (DisCL). Focusing on the long-tail classification and learning from low-quality data tasks, the author demonstrates the efficacy of DisCL." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper is difficult to follow. This is partly because key details are missing in the main paper. For instance, in Section 4.1, the authors mention the use of a set of diverse textual prompts, while the details are deferred to the appendix. Another instance is in Section 4.2, where the authors mention that inspired by DoCL, they propose an adaptive curriculum. However, there is no discussion of the proposed adaptive curriculum in that section.\n- The concept that the choice of the starting timestep $t$ controls the impact of $z_{real}$ has been extensively studied in prior works, notable being SDEdit [1]. Therefore, it would be easier for the readers to follow if the authors cite the existing works and explain the similarities.\n- Using generative models for data augmentation has been an active area of research, with many approaches proposed in the literature [2,3,4,5]. The authors can compare their approach with these existing works to substantiate their novelty and demonstrate the impact of using a pre-defined or adaptive generative curriculum. The current evaluation is limited.\n\nReferences:\n- Meng, Chenlin, et al. \"Sdedit: Guided image synthesis and editing with stochastic differential equations.\" arXiv preprint arXiv:2108.01073 (2021).\n- Roy, Aniket, et al. \"Cap2aug: Caption guided image to image data augmentation.\" arXiv preprint arXiv:2212.05404 (2022).\n- Luzi, Lorenzo, et al. \"Boomerang: Local sampling on image manifolds using diffusion models.\" arXiv preprint arXiv:2210.12100 (2022).\n- Koohpayegani, Soroush Abbasi, et al. \"GeNIe: Generative Hard Negative Images Through Diffusion.\" arXiv preprint arXiv:2312.02548 (2023).\n- Trabucco, Brandon, et al. \"Effective data augmentation with diffusion models.\" arXiv preprint arXiv:2302.07944 (2023)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I am concerned about using only the pre-trained stable diffusion model without further fine-tuning (I am happy to update the rating if the following questions are addressed).\n1. Were there any observed biases/failures in the synthetic data generated for small resolution datasets (For example CIFAR100-LT)? I am concerned because of the resolution differences between the CIFAR100-LT dataset and the resolution stable diffusion model is trained on.\n2. In real-life scenarios, some datasets we want to train the models are not real photographs, do you think a pre-trained diffusion model and your proposed method can be effective for Long-Tailed or low-quality datasets in domains of Comics, Drawings, etc?\n3. Using Clip score as a threshold to filter out generated images is reasonable but for some classes, it can be easy to filter out too many generated data from pretrained stable diffusion (therefore not a full spectrum of data can be generated and saved). May I ask if this situation also occurs in your experiments and could you provide the percentage of filtered images and any strategies you employed to ensure sufficient data?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The idea of adjusting guidance scales to obtain a greater variety and quality of training data augmentation is interesting and novel.\n2. Generative Curriculum Learning is reasonable and can adjust for different tasks.\n2. The proposed method proves its effectiveness in both experiments in long-tail classification and learning from low-quality data." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper addresses the challenge from training computer vision models (image classification task) with low-quality or scarce data. The paper proposes Diffusion Curriculum (DisCL) which leverages diffusion models to synthesize hard image examples data with different guidance scales and then utilizes a Generative Curriculum Learning to select appropriate synthetic data from the full spectrum of generated data for training data augmentation. Experiments are conducted on two tasks: long-tail classification and learning from low-quality data, to show the method's effectiveness." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. A tradeoff is the speed of generating a full spectrum for images. For large datasets, the diffusion models can consume a long time to generate a full spectrum of needed images." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a curricula on image guidance level to improve learning on the low-quality data and long-tail classification." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024diffusion,\ntitle={Diffusion Curriculum: Synthetic-to-Real Data Curriculum via Image-Guided Diffusion},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0RgLIMh94b},\nnote={under review}\n}" }, "abstract": { "value": "Low-quality or scarce data has posed significant challenges for training deep neural networks in practice. While classical data augmentation cannot contribute very different new data, diffusion models opens up a new door to build self-evolving AI by generating high-quality and diverse synthetic data through text-guided prompts. However, text-only guidance cannot control synthetic images' proximity to the original images, resulting in out-of-distribution data detrimental to the model performance. To overcome the limitation, we study image guidance to achieve a spectrum of interpolations between synthetic and real images. With stronger image guidance, the generated images are similar to the training data but hard to learn. While with weaker image guidance, the synthetic images will be easier for model but contribute to a larger distribution gap with the original data. The generated full spectrum of data enables us to build a novel \"Diffusion CurricuLum (DisCL)\". DisCL adjusts the image guidance level of image synthesis for each training stage: It identifies and focuses on hard samples for the model and assesses the most effective guidance level of synthetic images to improve hard data learning. We apply DisCL to two challenging tasks: long-tail (LT) classification and learning from low-quality data. It focuses on lower-guidance images of high-quality to learn prototypical features as a warm-up of learning higher-guidance images that might be weak on diversity or quality. Extensive experiments showcase a gain of 2.7$\\%$ and 2.1$\\%$ in OOD and ID macro-accuracy when applying DisCL to iWildCam dataset. On ImageNet-LT, DisCL improves the base model's tail-class accuracy from 4.4$\\%$ to 23.64$\\%$ and leads to a 4.02$\\%$ improvement in all-class accuracy." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Synthetic data", "Curriculum Learning", "Diffusion Models" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/4aa9ac43dfde44c016bf8d99be2d5195cfc6d553.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Diffusion Curriculum: Synthetic-to-Real Data Curriculum via Image-Guided Diffusion" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0SpkBUPjL3
Unremovable Watermarks for Open-Source Language Models
main
Active
watermark;large language model
alignment, fairness, safety, privacy, and societal considerations
3;3;3;5
3;4;5;3
2;2;2;2
1;2;1;2
4;2;1;2
3.5
3.75
2
1.5
2.25
-0.522233
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- In Theorem 1 shouldn't the distribution over $w^\\star$ be centered at $w_{wat}$ and not 0? This is also present in the proof and Theorem 3. Is this a mistake or my misunderstanding of the statements?\n- L145 states that Aaronson (2022), Christ (2024) and Fairoze (2023) are based on partitioning the tokens into red and green lists. Can you elaborate on this view of these methods, as my understanding was that they are quite different and do not use the red/green concept? \n- Were OPT models modified to use the final layer bias to enable experimental evaluation?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- As the authors recognize, watermarking of open-source LLMs is one of the most important open problems in current generative model watermarking research, so studying it has the potential for high impact." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper focuses on the problem of LLM watermarking and proposes a scheme applicable to open-source models. The key claimed advantage of the scheme is its provable unremovability which the authors rigorously derive. Experimental results on two OPT models are shown, including the evaluation of robustness to token substitution and Gaussian perturbations to model weights." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Unfortunately I believe the paper in its current state is far from being able to deliver that impact. Namely: \n- While I agree that some definition must exist, formalizing \"LLM that produces high-quality text\" as \"closeness to the original LLM in weights of the last bias layer\" seems arbitrary and far from realistic notions of quality. This greatly simplifies the problem, unfortunately making the theoretical results (claimed key contribution) largely not relevant. While I appreciate the rigor and work authors have put in proving the results, formalizing the intuition that a random vector in N-dimensional space is unlikely to match a particular unknown direction, I unfortunately do not think this provides any valuable insight in terms of the robustness of an OSS watermark to realistic transformations.\n- Given this, the blanket claims that the watermark is \"unremovable\" (title, abstract, introduction) render as dangerous overclaims that may cause confusion in the field if the paper is accepted. These should be greatly adjusted and qualified to explain the peculiar definition of quality. To actually get a meaningful notion of unremovability, the authors could consider realistic transformations commonly applied to OSS models such as finetuning, PEFT, quantization, pruning, or at least random modification of all weights (the argument on L129/130 is unclear). These are currently neither discussed in theory nor included in the evaluation. Interestingly, the authors recognize that prior work Gu et al. (2024) considers fine-tuning yet do not consider this themselves. \n- As the authors also recognize, the proposed scheme is a variant of UnigramWatermark. While scheme simplicity is not a weakness per se, interesting/novel technical aspects of the proposed scheme are also not a strength of this paper. This is further harmed by the fact that popular LLMs often do not use the final layer bias, making the proposed scheme inapplicable. In fact, this is true for OPT models used in this work (https://github.com/huggingface/transformers/blob/v4.46.0/src/transformers/models/opt/modeling_opt.py#L1052), bringing into question the current evaluation. \n- LLM watermarking, which this paper positions itself as part of, generally focuses on detecting LLM-generated outputs. Yet, this paper starts from the related but different notion of detecting that a model was based on a watermarked model from its weights, and prove key results in this case. This is a new scenario which is unexplained and unmotivated, should be explicitly separated from the common understanding of LLM watermarking promised in early parts of the paper, and raises many questions. For example, if we assume transformations of our OSS model change nothing but the final bias layer, can't we use the other (unchanged) weights to demonstrate that the resulting model was made from our model?\n- Evaluation has many drawbacks, among else it does not include any baseline (such as Gu et al. (2024)), uses high FPRs, and uses no realistic attacks on text such as paraphrasing, generally used in prior work. As the authors note, the performance of the watermark is below non-OSS baselines, which is to be expected, but does not present a case for this method as useful beyond the OSS case.\n- The paper is written and presented in a confusing and convoluted way, seems to be written in a rush, and it is often very hard to understand the key parts. I include some examples/suggestions below, in hopes that this helps the authors get insight into the issues and improve their writing in the future to the level expected at ICLR. I am happy to further assist the authors here if they have questions. \n- (Minor) While this does not affect my assessment, L173 contains another dangerous claim, citing Zhang et al. (2024) to say that any LLM watermark is removable. This is a misunderstanding of the original paper which studies an idealized case where a random walk on the space of \"equivalent\" documents is possible while preserving quality, and the random walk is rapidly mixing. To avoid misinforming readers, this citation should be appropriately qualified.\n\nOverall, while I do appreciate the authors tackling such a hard and important problem, I do not see the contribution of the paper at this point, and believe it thus to be clearly below the bar for acceptance.\n\n---\n\nThe list of writing/formatting/presentation comments for the authors follows, which I hope they find helpful. I do not expect authors to reply to each point, although I should be corrected if I misinterpreted some points.\n- L53: the phrase \"unremovability from a modest amount of text\" is confusing and should be made more precise\n- L54-60 seems to repeat the same point about the adversary twice, requiring several readings \n- I appreciate the inclusion of the Overview section; however, instead of previewing and summarizing the technical details, this section is often the only place where concepts are explained in terms of the concrete instantiation of interest (LLMs). E.g., 4.1. does not reflect on what unremovability means in the setting we consider, but only provides abstract definitions. This makes the paper hard to read and understand the actual instantiation. \n- Another choice that contributes to this is the use of \"content\" to counterinuitively often mean \"last layer bias vector\" instead of \"text\". Similarly in Alg. 3 it is not made clear if \"original content\" refers to the watermarked or pre-watermarked model weights; effort by the reader is needed to understand this. \n- Sec. 2 uses (M*, M') for (original, watermarked) model, inconsistent with ($w^\\star, w_{wat}$) below, causing some confusion. \n- L87: \"checking the correlation\" is quite imprecise\n- L104: why the region is a halfspace is not explained; while it is a simple property of dot product, this should be made explicit to help readers grep this part \n- L107: \"add to $w^*$\" is unclear. I suspect this should say \"the adversary can add a vector to $w_{wat}$\" instead; this should be made precise.\n- L230: Q should probably be L? Such mistakes should be especially avoided in key definitions.\n- L231: \"p.p.t.\" should be defined, I believe it is not a standard term in this community \n- L318: logits $l_i$ seem to refer to values before adding any bias? This is very ambiguous and should be made clear in the writing.\n- \"Quality score\" shows up first in Fig. 3 but is not previously introduced which is quite confusing.\n- The paper has no figures before the evaluation which is highly unusual, especially as there ary many instances where a visualization would greatly aid understanding (e.g., halfspaces / gaussians in the model parameter space). I suggest the authors generally consider this when writing.\n- The margins on top of each page are very small which suggests the style file was tweaked. Note that the ICLR formatting instructions state \"Tweaking the style files may be grounds for rejection.\". While the paper doesn't seem to have been desk rejected in this instance, I strongly suggest the authors remedy this and follow the instructions." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. What is the relationship between Algorithm 4 and Algorithm 3? If Algorithm 4 is the primary method for text watermark detection, then when is Algorithm 3 invoked?\n\n2. How is the symbol \\Delta (x_i) defined in Algorithm 4? How is it calculated? \n\n3. In watermark detection, each token should be evaluated. Why is it necessary to check x_i \\in S in line 4 of Algorithm 4?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The authors attempt to embed noise information following a normal distribution as a watermark into the text output of the model. This is an interesting endeavor that could potentially aid future watermark embedding algorithms.\n\n2.The paper attempts to theoretically discuss the unremovability of watermarks, which is also an interesting analysis." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a method for embedding watermarks in large language models (LLMs). This method incorporates watermark information by adding noise that follows a normal distribution to the model's output, with the noise serving as the watermark's key. The authors also demonstrate that, under certain assumptions, the embedded watermark is unremovable. The feasibility of the proposed scheme is validated using the OPT-6.7B and OPT-1.3B models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper's description of the algorithm is not clear enough and does not reflect the specific implementation of the watermark embedding algorithm. There is watermark detection for text in Algorithm 4, but there is no embedding algorithm for text.\n\n2. The paper discusses the unremovability of watermarks, which is generally referred to as robustness in other papers. The paper does not compare the robustness of its approach with those of other papers. It should also discuss the soundness property of the watermark, which typically contradicts robustness.\n\n3. The writing in the paper is not clear enough, which makes it difficult to understand the algorithms it contains. Specific issues will be provided below." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Please answer those in weaknesses above." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper introduces a new watermarking scheme to embed unremovable watermarks directly in model weights and resist tampering in open-source environments.\n- The paper defines \"unremovable watermarks,\" providing proofs and an analysis of the watermark’s robustness against attacks and conducting experiments with OPT-6.7B and OPT-1.3B models to demonstrate the approach's effectiveness.\n- The paper is well-structured, logically presenting its motivation, methodology, and findings, with clear definitions and algorithms. I highly commend the authors for the nice presentation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces the first watermarking scheme for open-source LLMs. The idea is to embed a watermark directly in the model's parameters rather than through the sampling algorithm, making it resilient to tampering in open-source environments. The authors define \"unremovable watermarks\" for neural networks and implement a scheme that perturbs neuron biases in the model's final layer with Gaussian noise. Detection of the watermark is achieved either by examining the weights for specific bias patterns or by analyzing output text for token frequency markers. The watermark is shown to be unremovable, as attempts to erase it degrade model quality, with experimental results on OPT-6.7B and OPT-1.3B supporting this claim." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Now, the authors claim to have introduced the first watermarking scheme for open-source LLMs. What do they mean by this? There are many watermarking schemes which could be deployed in open source LLMs, so this claim might not be right, as the proposed scheme can also be deployed in closed source LLMs by the model owners. Which leads to the next question. If the LLM is open source, what exactly is the benefit of watermarking when the attacker has direct access to model's weights. Can the authors expand more on their motivation?\n- The proposed approach embeds watermark signals to the bias of the last layer's neurons. There is another approach by ByteDance that injects watermark into the LLM weights by finetuning (https://arxiv.org/pdf/2403.10553). Why is there no comparison with this approach? Infact, why is there no comparison with other watermarking schemes at all?\n- There are adaptive ways to bypass watermarks. One is by using adaptive paraphrasers. If the proposed watermark scheme is unremovable, yet detectable, why are there no empirical results proving the 'unremovability' claim using adaptive paraphrasers, or even normal paraphrasers like Dipper, or even using open source LLMs for paraphrasing.\n- How efficient is the detection process? How many tokens does it require to detect the proposed scheme, especially using its optimal hyperparameters? I feel the experiments the authors provided to prove the efficiency and strength of this approach are not enough." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Can you address the two issues of robustness (to small change in the output) and the undetectability of the output (compared to the non-watermarked model) ?\n\n In your experiments, how do you measure that the utility of the model has not degraded after adding the watermark. I know that you have an oracle that measures the degrading, but then you instantiate the oracle using mathematical formulas regarding the model. But how do you make sure that this reflects the actual quality of the model’s output? For example, you could use specific metrics or human evaluation methods to assess output quality more directly.\n\nCan you discuss why the assumptions are fine? There are 3 explicit assumptions and (multiple) implicit assumptions in the statement of Theorem 1 (eg., “let C be such that…” or “c_2-high quality…”) I think that discussion is needed before calling assumptions reasonable (instead of putting the word reasonable in the theorem statement).\n\nCan you argue either way about the effect of fine tuning in your watermarked model?\n\nIn your experiments: can you be more explicit about what your attacker is? e.g., using a pseudocode." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "As far as I know, this is the first work that aims to formally define and address “unremovable watermarks” that are planted in open source models." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The problem of watermarking the output of LLMs has been around for some time. Previous work has focused on changing the output distribution of the LLMs, sometimes in an “undetectable/distortion-free” way. But this work starts by making the following point:\nIf one has access to the parameters of an LLM, they can run it to generate output that is not watermarked. \n\nThe main problem of this paper is to watermark the parameters of the model itself, in a way that it both gets reflected in the output + even if one gets their hand on the model parameters, they cannot modify it in a way that the watermark is removed from subsequent outputs.\n\nOf course one shall consider attackers who can train the model from scratch. So the paper assumes that doing so is hard (e.g., by making the access to the data generation process costly).\n\nThe contribution of the paper is the following. They propose a scheme that works by adding Gaussian noise to the last layer of the model before publishing it. Also knowing the original model and the noise, they show how to verify whether the generated output comes from their model or not.\n\nThe paper then makes multiple assumptions to prove that their scheme is secure. The final watermark suffers from not having robustness or undetectability. It is not clear if such weaknesses are inherent or not." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper does not fully address several, by now well-recognized aspects, of the watermark:\n1. Robustness of the watermarks. E.g., what if one changes even two characters of the produced output? Or that it deletes parts of the output. Here the paper claims it has done experiments but i could not figure out what exact perturbation channels are studied.\n2. It seems that the output of the watermarked model here is *not* indistinguishable -- sometimes called undetectable or distortion free -- (in comparison with non-watermarked model's output). This is the ultimate way of arguing that the model’s utility does not degrade after adding the watermark and the paper does not discuss it clearly. Note that here, I am not talking about \"removability\". This is about the item above (robustness) but rather if the output of the watermarked model differs (in a computationally noticeable way) from the output of non-watermarked model.\n\nTo partially address the above issues, the paper should first define clearly what class of perturbation channels they study (and why they are interesting) for the robustness property evolutions (which are seemingly done under the name of Detectability) and for the item 2 above (undetectability of the output -- which is different from the Detectability study) they should design experiments specifically for this goal or make a theoretical assertion.\n\nAlso, the proofs are based on multiple assumptions, which make the final conclusion far from ideal. (See my question below)\n\nAlso, what happens to the watermarks if the model is fine tuned? (note that black-box methods still work, if the model is fine tuned). This issue should be addressed using experiments. they could be simple experiments that simply test the detectability and utility of the outputs after a fine tuning for specific goals (also see my question below).\n\nThe writing also is not great and lacks discussions and justifications with regard to the issue mentioned above (e.g., of the assumptions). \nOther than the issues above, the intuition behind why this non-black-box approach is working could be much better.\n\nOther minor comments on writing:\n\nDef 2 seems to be more like a “similarity” measure, because the loss in quality seems to be different. For example, two models could look very different but have the same quality of responses.\n\nDef 4: seems to mix the input space of Q and \\ell, right?" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We construct a watermark for open-source language models that is provably unremovable, even by an adversary with access to the model's weights." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024unremovable,\ntitle={Unremovable Watermarks for Open-Source Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0SpkBUPjL3},\nnote={under review}\n}" }, "abstract": { "value": "The recent explosion of high-quality language models has necessitated new methods for identifying AI-generated text. Watermarking is a leading solution and could prove to be an essential tool in the age of generative AI. Existing approaches embed watermarks at inference and crucially rely on the large language model (LLM) specification and parameters being secret, which makes them inapplicable to the open-source setting. In this work, we introduce the first watermarking scheme for open-source LLMs. Our scheme works by modifying the parameters of the model, but the watermark can be detected from just the outputs of the model. Perhaps surprisingly, we prove that our watermarks are $\\textit{unremovable}$ under certain assumptions about the adversary's knowledge. To demonstrate the behavior of our construction under concrete parameter instantiations, we present experimental results with OPT-6.7B and OPT-1.3B. We demonstrate robustness to both token substitution and perturbation of the model parameters. We find that the stronger of these attacks, the model-perturbation attack, requires deteriorating the quality score to 0 out of 100 in order to bring the detection rate down to 50%." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "watermark", "large language model" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/1c718e63c1d4bb45e5c294633410633ec7605b3e.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Unremovable Watermarks for Open-Source Language Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0T49QbSOho
Regret-Optimal List Replicable Bandit Learning: Matching Upper and Lower Bounds
main
Active
Replicability;Regret Bound;Bandit
learning theory
5;6;6;8
3;3;2;4
3;3;3;4
3;3;2;3
3;3;3;4
6.25
3
3.25
2.75
3.25
0.648886
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The problem setting proposed is both novel and intriguing, characterized by a rigorously defined concept of bandit replicability in Definition 2.2.\n2. The theoretical analysis provided is exhaustive, introducing three distinct algorithms tailored to various parameters of replicability." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies list replicability in multi-armed bandits (MAB), defining an algorithm as list replicable if it limits the distinct arm sequences (traces) across independent executions with high probability. Further, this paper proposes three algorithms with different parameters of list replicability. Finally, this paper investigates a lower bound of bandits with list replicability." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Algorithms 1 and 2 exhibit considerable similarities. Could there be a method to consolidate these two algorithms into a unified framework?\n\n2. In Theorem 6.1, the designation \"lower bound\" appears misapplied as it does not seem to correspond to the lower bounds of any algorithms discussed previously. Notably, in Theorem 6.1 we have $l \\approx k$, whereas in prior algorithms $l \\gg k$ in most cases. In my humble opinion, a valid lower bound should be able to explain whether the proposed algorithms can be further optimized in general.\nFurthermore, why the authors said \"We show that result (3) is nearly tight for B=2\" in the abstract. What's the hidden constant behind $\\Omega(B) $ in (3). Do you mean the regret of (3) is $O(T)$ for $B=2$?\n\n3. Would it be more accurate to describe what is currently referred to as \"lower bounds\" in Theorem 6.1 as \"impossibility results\"? I think Theorem 6.1 is quite trivial because any pair of traces should share more than two arms if the total number of traces is less than $K$.\n\n4. The absence of experimental validation in this paper is notable. Including even preliminary numerical simulations or toy experiments could significantly enhance the validity and impact of the proposed algorithms." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Line 18, $\\widetilde O \\sqrt{kT}$ missing parentheses.\n2. The notion of $O(\\cdot)$ and $\\Omega(\\cdot)$ was a little abused. The paper contains regret bound like $\\widetilde O (k^{\\frac32} T^{\\frac12 + 2^{-\\Omega(B)}})$. Here, it's inappropriate to use $\\Omega(\\cdot)$ in $\\widetilde O(T^{2^{-\\Omega(B)}})$, because the constant before $B$ cannot be ignored, e.g., $T^{2^{-B}}$ and $T^{2^{-2B}}$ have very different order." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper proposes a definition of reproducibility in bandits problems.\n2. The paper proves tight trade-off between replicability and regret dependency on $T$. \n3. The proof to the lower bound is quite insightful." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies list replicability in multi-armed bandits and linear bandits. It comes up with the notion of $(\\ell, \\delta)$-list replicability, and proved various trade-off between replicability and regret dependency on number of arms and on time horizon. Furthermore, the paper extends the results to linear bandits setting." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The algorithms are generally based on successive elimination, so it contains less insight on more widely used bandits algorithms like UCB.\n2. The proofs to the upper bounds are quite simple and lack enough novelty given their similarity to successive elimination." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- While it seems that replicability papers often omit experiments, bandit experiments are generally straightforward to conduct. Did the authors consider demonstrating some experimental results?\n- Most of the algorithms appear to be adaptations of standard elimination-based bandit algorithms for both k-armed and linear bandit problems. It would be valuable if the authors could reference each classical elimination algorithm and include a side-by-side comparison showing what aspects of these algorithms break replicability and how the new modifications enable it.\n- Given that the study addresses regret minimization—typically dominated by UCB-type algorithms for stronger instance guarantees—the authors’ choice of elimination-based algorithms is interesting. Could you clarify the rationale behind this choice?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is well-written and structured, with a clear motivation. Tho short, it presents a comprehensive set of results for both k-armed and linear bandits, though the linear bandit results appear to be preliminary." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces replicability to the multi-armed bandit area through the concept of list replicability and proposes algorithms for both k-armed and linear bandits. Notably, for k-armed bandits, the authors provide a lower bound demonstrating that one proposed algorithm is nearly optimal." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- It would be helpful to clarify which variables the hidden logarithmic factors depend on, and whether these factors are consistent throughout the paper.\n- No experiments are presented." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. Could you compare $\\rho$-replicability and list replicability with respect to their potential practical applications, such as in clinical trials?\n2. Why is $C$ referred to as the number of shifts? Do you mean the number of possible shift $r$?\n3. Minor typos: Line 207: Theorem 2.1 -> Assumption 2.1; Line 210: lemma -> lemmas; Line 346: the of -> the number of.\n4. Thomson sampling and UCB are two well-established algorithms in the bandit literature. Thomson sampling is randomized, making it tricky to provide strong list replicability guarantees. Could you discuss the potential challenges in adapting UCB? My intuition is that UCB might achieve good list replicability with appropriate early-stage modifications." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "1. Although the paper is highly theoretical, it is well-presented and clearly conveys the key ideas behind the algorithm designs and proofs.\n\n2. Three algorithms with varying levels of guarantees are introduced, each with its own significance. Notably, the first algorithm achieves near-optimal cumulative regret, and the total number of possible traces is independent of T. The last algorithm is based on a subroutine from Dixon et al. (2023) and is nearly optimal, given the lower bound in Section 6.\n\n3. The theoretical contributions are nontrivial, and the analysis of the phase-elimination algorithm is novel, which should be of interest to the bandit community. It is also interesting that the lower bound is proven using the Sperner/KKM lemma, a combinatorial result in coloring." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces the concept of list replicability to the multi-armed bandit model, where the sequence of arm pulls must lie in a small finite list with high probability. The authors design and analyze three algorithms, each providing different levels of guarantees on list replicability and high-probability regret. Additionally, a nearly matching lower bound is proved for any algorithm with sub-linear regret. The paper also extends the study to linear bandits." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The main criticism of the paper might lie in its motivation. In the introduction, it is suggested that list replicability might be beneficial for safety-critical applications, as one could be prepared for the action sequence being played. However, although the proposed algorithms can ensure a small number of traces with high probability, these possible traces cannot be known without exact knowledge of the problem instance. Therefore, outside of the theoretical domain, the practical application of list replicability seems limited." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024regretoptimal,\ntitle={Regret-Optimal List Replicable Bandit Learning: Matching Upper and Lower Bounds},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0T49QbSOho},\nnote={under review}\n}" }, "abstract": { "value": "This paper investigates *list replicability* [Dixon et al., 2023] in the context of multi-armed (also linear) bandits (MAB). We define an algorithm $A$ for MAB to be $(\\ell,\\delta)$-list replicable if with probability at least $1-\\delta$, $A$ has at most $\\ell$ traces in independent executions even with different random bits, where a trace means sequence of arms played during an execution. For $k$-armed bandits, although the total number of traces can be $\\Omega(k^T)$ for a time horizon $T$, we present several surprising upper bounds that either independent of or logarithmic of $T$: (1) a $(2^{k},\\delta)$-list replicable algorithm with near-optimal regret, $\\widetilde{O}{\\sqrt{kT}}$, (2) a $(O(k/\\delta),\\delta)$-list replicable algorithm with regret $\\widetilde{O}\\left(\\frac{k}{\\delta}\\sqrt{kT}\\right)$, (3) a $((k+1)^{B-1}, \\delta)$-list replicable algorithm with regret $\\widetilde{O}(k^{\\frac{3}{2}}T^{{\\frac{1}{2}}+2^{-\\Omega(B)}})$ for any integer $B>1$. We show that result (3) is nearly tight by establishing there are no $(k-1,\\delta)$-list replicable algorithm with $o(T)$-regret, almost exactly matching $k$-list replicable upper bound for $B=2$. We further show that for linear bandits with $d$-dimensional features, there is a $\\widetilde{O}(d^2T^{1/2+2^{-\\Omega(B)}})$-regret algorithm with $((2d+1)^{B-1},\\delta)$-list replicability, for $B>1$, even when the number of possible arms can be infinite." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Replicability", "Regret Bound", "Bandit" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/44710b3bc003380b20596999b26ecd368edea3b9.pdf" }, "presentation": null, "primary_area": { "value": "learning theory" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Regret-Optimal List Replicable Bandit Learning: Matching Upper and Lower Bounds" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0T8vCKa7yu
LLM Compression with Convex Optimization—Part 1: Weight Quantization
main
Active
weight quantization;model compression;large language models
optimization
3;3;3;3
4;5;4;4
2;3;2;2
2;2;1;2
2;3;3;2
3
4.25
2.25
1.75
2.5
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "Some reviewers seem to be confused about how GPTQ and AWQ (Frantar et al., 2022; Lin et al., 2024) accelerate LLM inference on GPUs. These methods do not perform arithmetic directly using 3 or 4 bit weights (not possible because of float16 activations). Instead, these methods de-quantize the 3- or 4-bit weights back to float16 dynamically/as needed, multiplying them by float16 activations. This leads to acceleration because weights can travel faster through the memory hierarchy. Our proposed method accelerates inference in the same way, by dequantizing groups of weights (some in 0 bits, some in 1 bits ... some in 8 bits, etc) to float16 as needed. Correctly understanding this is key to clearing any misconceptions about mixed-precision quantization not leading to acceleration on GPUs.\n\nExcerpt from GPTQ (Frantar et al., 2022):\nCompute is dominated by matrix-vector products. Unlike matrix-matrix products, these are primarily limited by memory bandwidth. We address this problem by developing a quantized-matrix full-precision-vector product kernel which performs a matrix vector product by dynamically dequantizing weights when needed ...\n\nExcerpt from AWQ (Lin et al., 2024): \nGeneration stage is memory-bound ... the only way to improve the peak performance is to reduce the total amount of memory traffic. AWQ reduces the weight memory by four times ... we need to dequantize integers to FP16 before performing matrix computation. We avoid writing dequantized weights into DRAM ...\n\nLn 30 of our original manuscript already summarizes the above excerpts." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": { "value": "Clearing reviewers' misconceptions about GPTQ and AWQ." }, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "Apologies if my tone seems direct; it's meant purely for clarity.\n__________\n*1. \"Mixed-precision quantization is a well-researched field ... highlight how its method differs from existing techniques ...\"*\n\nWhile mixed-precision quantization has previously been explored (Wang et al., 2019; Chen et al., 2021; Lee et al., 2024; Dettmers et al., 2023), these methods assign different bit depths from a limited set of bit-depth options (e.g., 4 or 16 bits) or only across different layers. This is due to combinatorial nature of mixed bit-depth assignment. This limits the attainable quantized model accuracy especially for LLMs with hundreds of billions of parameters.\n\nIn contrast, we formulate bit-depth assignment as a convex optimization problem. This allows us to overcome the combinatorial challenges faced by prior methods and to achieve true mixed-precision quantization at an arbitrary level of granularity (per-channel or per-layer) with a wider range of bit depth options, ({0, 1, 2, 3, 4, 5, 6, 7, 8}). This leads to optimal model quantization tailored specifically to the demands of each channel or layer. \n\n(These paragraphs now in the revised manuscript.)\n__________\n*\"... why it chose to compare solely with LLM quantization methods ...*\"\n\nOur paper is on LLM quantization. As such, we benchmark against state-of-the-art LLM quantization techniques, including mixed-precision and fixed-precision methods. These methods are also compared against in other LLM quantization works, ensuring a robust and contextually relevant evaluation of our method.\n__________\n*\"2. ... Group quantization is not a new concept ...*\"\n\nWe meant to say that per-channel mixed precision works well with the grouping mechanism; see Table 2(c). The revised manuscript now correctly attributes the grouping mechanism to GPTQ and AWQ (Frantar et al., 2022; Lin et al., 2024).\n__________\n*\"3.. ... The writing needs improvement. The definition of \"part-1\" in the title is unclear ...\"*\n\nThank you, we will revise/shorten the title per your suggestion.\n__________\n*\"4. ... The convex optimization formulation proposed seems flawed. For instance, in equation three, f(X) is not convex ...\"*\n\n__Not true.__ We never say or imply the network model $f$ is convex. It is the optimization objective $d$ that is convex with respect to continuous variables $B_1,\\dots,B_N$. Objective $d$ is convex by construction since $f$ is linear(-ized) as in Hassibi and Stork (1992) and the MSE loss is convex. See eq. (5) and Appendix A for details. \n__________\n*\"5. ... The utility of mixed precision within a matrix is unclear ... Most mixed-precision quantizations occur between layers, not within a matrix.*\"\n\nOur work, as well as GPTQ, AWQ and OWQ (Frantar et al., 2022; Lin et al., 2024; Lee et al., 2024) are examples of weight-only quantization methods. None of these methods / their kernels perform arithmetic in 3 or 4 bits. At inference time, weights are de-quantized back into float16 so that they can multiply with float16 activations. This still amounts to acceleration because quantized weights can essentially travel faster through the memory hierarchy (registers–L1 cache–L2 cache–global). If weights must be dequantized, there is no need to insist upon mixed-precision quantization only across layers; see (Dettmers et al., 2022; Lee at al., 2024) for other examples of channel-wise mixed precision quantization. Our response to Pd64 clarifies this further." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "Apologies in advance if my tone seems direct; it's meant purely for clarity.\n_________________\n*1. \"The main concern ... the authors permit each weight to have a different bit depth assignment, ...\"*\n\n__Not true.__ We use a grouping/clustering idea similar to GPTQ and AWQ (Frantar et al., 2022; Lin et al., 2024). Ln 356 says that we assign a single bit depth to a group of 512 weights (OPT) or 256 weights (Llama-2). Table 2(c) also shows that a group size of 512 performs better than 64, 128, or 256 for our quantized OPT models. For added clarity, we add to the caption of Figure 4: ... Clustering with a cluster size of 2 illustrated only for clarity.\n_________________\n*2. \"... AWQ employs dedicated kernels and uniformly quantizes all weights to 4 bits, aligning with the availability of a 4-bit engine. However, the manuscript lacks discussion on hardware acceleration or performance degradation.\"*\n\n__Not true.__ GPTQ and AWQ (Frantar et al., 2022; Lin et al., 2024) and their engines do not perform arithmetic in 4 or 3 bits. These methods dequantize 4- or 3-bit weights back into float16 on the fly so that weight-activation multiplication can performed in float16. We dequantize mixed precision weights (some of which are 3 bits, some 4 bits, some 8 bits, etc.) back into float16 in exactly the same way. \n__________________\n*3. \"... clarify how different bit-depth assignments would affect matrix multiplication kernels as batch size increases, as this could have a significant impact on performance.\"*\n\nIncreasing the batch size does not change inference, as weights are always dequantized back to float16 as a first step and activations are always kept in float16. Indeed, this is the approach used also used by (Frantar et al., 2022; Lin et al., 2024; Lee et al., 2024).\n__________________\n*4. \"the lack of considerations for hardware acceleration\"*\n\nFirst, GPTQ, AWQ and OWQ (Frantar et al., 2022; Lin et al., 2024; Lee et al., 2024) quantize weights only. At inference time, weights are de-quantized back into float16 (so that they can be used on float16 activations). This still amounts to acceleration because quantized weights can essentially travel faster through the memory hierarchy (registers–L1 cache–L2 cache–global). Our method leads to hardware acceleration in exactly the same manner.\n__________________\n*5. \"Use of configurations, such as varying bit depths, that seem impractical and create unfair comparisons with prior work ... the need for a reevaluation of experimental results, given that the proposed quantization schemes operate under fundamentally different assumptions\"*\n\n__Not true.__ The methods you mention (GPTQ, AWQ and OWQ) do __not__ perform arithmetic directly in 3 or 4 bit weights and activations. They assume weights will be de-quantized back to float16 as needed for weight–activation multiplications in float16. This is the same assumption that our method is based on. Our method has the additional flexibility of assigning different bit depths (8, 7, 6, 5, 4, 3, 2, 1, or even 0 bits) to different groups of 512 weights to maximize the accuracy of the quantized model while maintaining the same de-quantization complexity (since everything is scalar quantized)." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "Apologies if my tone seems direct; it's meant purely for clarity.\n_________________________\n*\"Lacks comparison with existing LLM quantization methods such as FlexRound [1] and QuIP [2].\"*\n\nFlexRound [1] does not have a publicly available code and its results are not reproducible. We now include QuIP [2] results in Tables 2 and 4 of the revised manuscript. Note that the official QuIP [2] code does not work correctly on Llama-2 models (a known issue reported on the QuIP GitHub), producing perplexities higher than RTN. Out of respect for the authors of QuIP, we do not report these QuIP results on Llama-2 models.\n_________________________\n*\"Primarily evaluates LLM performance using perplexity, with insufficient comparison across other metrics like MMLU and AlpacaEval.\"*\n\nYes. However, we have some concerns with MMLU and AlpacaEval, as they are not very widely used, not even in the FlexRound [1] and QuIP [2] works that you refer to. So, we include one more perplexity metric (on C4) as well as the following new QA metrics: Arc (Challenge), Arc (Easy), HellaSwag, PIQA, and Winogrande. These are popularly used in other model compression papers. These are now shown in the Tables 4 and 5 (c) of the revised manuscript.\n__________________________\n*\"Insufficient discussion ... accelerated on existing hardware such as GPUs.\"*\n\nOur work accelerates inference in the same manner as GPTQ, AWQ, and OWQ (Frantar et al., 2022; Lin et al., 2024), which are weight-only quantization methods. These methods / their kernels do not perform arithmetic in 3 or 4 bits. At inference time, weights are de-quantized dynamically back into float16 so that they can multiply with float16 activations. This still leads to acceleration because quantized weights can travel faster through the memory hierarchy (registers–L1 cache–L2 cache–global). \n\n(This above text is now included in the revised manuscript. Please also see our response to Pd64.)\n__________________________\n*\"Tables 1 and 2 lack information on the average bit depth achieved by CVXQ ... may not exactly match the user-specific quantization bit depth ...\"*\n\nOur convergence tolerance is $10^{-6}$ bits (ln 233). The actual average bit depths achieved by CVXQ were 3.999999–4.000001 (for 4-bit models) and 2.999999–3.000001 (for 3 bit models).\n__________________________\n*\"What do the terms \"row\" and \"column\" mean in the context of row and column partitioning in Figure 3?\"*\n\nRow (resp. column) refers to the average bit depth savings achieved when assigning separate bit depths to rows (resp. columns) of each weight matrix. This is also stated more clearly in the revised manuscript.\n__________________________\n*\"What units were used for clustering in Tables 1 and 2?\"*\n\nLn 356 states that cluster sizes of 512 (OPT) and 256 (Llama-2) are used.\n__________________________\n*\"The Massive Activation paper[3] demonstrated significant performance degradation when clipping massive activations from activation distributions ... Can the proposed CVXQ method be extended to apply to activation distribution?*\"\n\nCVXQ already considers activation distribution. CVXQ uses the mean square magnitude of the weight gradient to inform bit depth assignment. By expressing weight gradient as the outer product of the input to the weights and the gradient of weight's output, we see that input magnitudes are indeed considered in the form of (mean square) gradient magnitude.\n___________________________\n*\"The quantization process described in the paper suggests that the time required for quantization might exponentially increase with the number of iterations, as shown in Figure 5.*\"\n\n__Not true.__ The number of iterations is kept the same across model sizes and the time each iteration takes is roughly linear (slightly super-linear) in the number of model parameters. CVXQ takes 47m to quantize the Llama-2-7B model, and 12h for the Llama-2-70B one. The revised manuscript now states these timings.\n___________________________\n*\"How does the size of the calibration set affect the performance of CVXQ?*\"\n\nFor OPT-1.3B and OPT-13B models, we experimented using 1024 calibration samples instead of 128 and the resulting perplexities on C4 were within ±0.01 of those based on 128 samples. This is consistent with the variance observed from choosing a different set of 128 calibration examples. \n\n(The above text is now included in the revised manuscript.)" }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* What do the terms \"row\" and \"column\" mean in the context of row and column partitioning in Figure 3?\n* What units were used for clustering in Tables 1 and 2?\n* The Massive Activation paper[3] demonstrated significant performance degradation when clipping massive activations from activation distributions. Papers like LLM.int8, SmoothQuant, and AWQ have shown the importance of considering activation distributions to mitigate the impact of outliers. Can the proposed CVXQ method be extended to apply to activation distribution?\n* How does the size of the calibration set affect the performance of CVXQ?\n\n[3] Massive Activations in Large Language Models, https://arxiv.org/abs/2402.17762" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* Demonstrates that companded quantization can reduce the mean square error of weights before and after quantization more effectively than uniform quantization.\n* Introduces a novel approach to weight-only quantization by employing various partitioning methods, specifically row and column clustering.\n* Proposes a method to minimize the degradation in performance due to quantization within a constrained average bit depth by finding the optimal bit assignment combination. This is achieved by defining the quantization objective function in a Lagrangian form and solving it using convex optimization.\n* Shows that the proposed partitioning methods can result in greater bit depth savings compared to non-partitioned methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a method called CVXQ for mixed precision weight-only quantization of large language models (LLMs) using convex optimization techniques. CVXQ allows for user-specific quantization bit depths by defining the average bit depth and then seeking to minimize quantization error within this constraint. The method introduces row-wise and column-wise clustering to achieve this goal, where each cluster can be assigned different bit depths. To assign these bit depths, the problem is formulated in a Lagrangian form and solved using convex optimization. The effectiveness of CVXQ is demonstrated by achieving superior performance on the WikiText perplexity (PPL) metric compared to methods such as GPTQ, AWQ, and OWQ across various sizes of OPT models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* Lacks comparison with existing LLM quantization methods such as FlexRound[1] and QuIP[2].\n* Primarily evaluates LLM performance using perplexity, with insufficient comparison across other metrics like MMLU and AlpacaEval.\n* Insufficient discussion and comparative analysis on how the proposed CVXQ method can be accelerated on existing hardware such as GPUs. One of the key goals of compression methods like quantization is to achieve actual acceleration. Although the paper mentions that this will be addressed in Part 2, it is crucial to include a discussion on how to accelerate the proposed quantization format.\n* Tables 1 and 2 lack information on the average bit depth achieved by CVXQ. Since the proposed method assigns bit depths through a convex optimization process, it may not exactly match the user-specific quantization bit depth, leading to potentially different compression rates in practice.\n* The quantization process described in the paper suggests that the time required for quantization might exponentially increase with the number of iterations, as shown in Figure 5.\n\n[1] FlexRound: Learnable Rounding by Element-wise Division for Post-Training Quantization, https://openreview.net/forum?id=-tYCaP0phY_\n\n[2] QuIP: 2-Bit Quantization of Large Language Models With Guarantees, https://arxiv.org/abs/2307.13304" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see weaknesses" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The authors derive several mathematical formulations for the quantization scheme, making a few assumptions about weight distributions, such as Normal or Laplace. They use figures to illustrate whether the statistical data from the OPT models align with these assumed distributions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduce a quantization framework called CVXQ, which first optimizes bit depth assignment and then refines step sizes and biases using convex optimization techniques. To further improve the quantization scheme, the framework incorporates matrix partitioning, dividing the matrix into a set of row or column sub-matrices, each with its own bit depth and step size. The experiments are conducted on Meta's LLaMA and OPT models, using PPL and GSM8K as evaluation metrics." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The main concern with this manuscript is that it does not address practical hardware constraints. Specifically, the authors permit each weight to have a different bit depth assignment, a strategy that is rarely seen in existing literature. For instance, AWQ employs dedicated kernels and uniformly quantizes all weights to 4 bits, aligning with the availability of a 4-bit engine. However, the manuscript lacks discussion on hardware acceleration or performance degradation resulting from the proposed quantization scheme.\n\nBy neglecting hardware-related considerations, the comparisons with previous works may appear unfair. Well-established quantization methods like OWQ, AWQ, or RTN explicitly demonstrate how their quantized models achieve latency improvements on common GPUs. In contrast, this manuscript explores more complex ideas, such as pruning and matrix partitioning, without addressing the impact on parallelism or the hardware requirements these approaches would entail.\n\nIt is crucial to describe the limitations of the quantization scheme for practical hardware implementation. Without doing so, methods that account for hardware acceleration might seem inadequate, despite the practical challenges associated with mixed precision or varying bit depth assignments.\n\nFor example, the authors should clarify how different bit-depth assignments would affect matrix multiplication kernels as batch size increases, as this could have a significant impact on performance.\n\nIn summary, the major concerns are: 1) the lack of considerations for hardware acceleration; 2) the use of configurations, such as varying bit depths, that seem impractical and create unfair comparisons with prior work; and 3) the need for a reevaluation of experimental results, given that the proposed quantization schemes operate under fundamentally different assumptions." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The paper claims that the proposed algorithm completes the quantization quickly, yet a lack of experimental or theoretical analysis supports this assertion. Could the authors provide more evidence or discussion on this aspect?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The proposed techniques are well-grounded in theory, and each aspect of the framework appears logically sound and justifiable." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a framework for efficient handling of large language models (LLMs) by (1) determining mixed-precision quantization at layer or group levels to meet a target bitwidth and (2) proposing a novel method for deciding quantization step sizes." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper introduces a mixed-precision approach, but comparisons are primarily made with uniform-precision quantization methods. A broader survey and comparison with other mixed-precision methods, addressing their strengths and weaknesses, would provide a stronger context for evaluating the proposed method.\n\nAn ablation study is needed. According to Z-Fold [1], step size determination methods like Min-Max, MMSE, and Hessian-based approaches are often used in quantization. A comparative analysis showing the effectiveness of the proposed method against these would strengthen the evaluation.\n\nSeparating the processes of bit-precision allocation and the quantization algorithm applied could provide clearer insights into each aspect of the method.\n\nThe proposed methodology is reasonable but lacks comparative analysis, which would underscore its relative advantages.\n\n\nTesting on a wider range of models and benchmarks would further validate the generalizability of the proposed approach.\n\n\n\n\n[1] Jeon et al. \"A frustratingly easy post-training quantization scheme for llms.\" Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. 2023." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper introduces a comprehensive quantization method for applying different bit allocation to groups within a large language model (LLM) matrix." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper tackles the critical issue of large language model (LLM) compression, proposing a novel quantization technique, CVXQ, viewed from a convex optimization perspective. CVXQ, scalable to models with hundreds of billions of weight parameters, allows users to compress models to any specified size after training" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper's contribution isn't distinct. Although it proposes treating dynamic bit allocation as a convex optimization problem, this approach faces several issues:\n - Mixed-precision quantization is a well-researched field; the paper should highlight how its method differs from existing techniques and why it chose to compare solely with LLM quantization methods.\n - Group quantization is not a new concept but a long-standing basic strategy in the quantization field.\n - The convex optimization formulation proposed seems flawed. For instance, in equation three, f(X) is not convex, which questions the validity of the entire problem.\n- The writing needs improvement. The definition of \"part-1\" in the title is unclear, and many descriptions in the text are ambiguous.\n- The utility of mixed precision within a matrix is unclear. This approach would require complex, specific hardware design, limiting its broad application. Most mixed-precision quantizations occur between layers, not within a matrix." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "For large language model compression, weight quantization using convex optimization leads to superior compressed model performance" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024llm,\ntitle={{LLM} Compression with Convex Optimization{\\textemdash}Part 1: Weight Quantization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0T8vCKa7yu},\nnote={under review}\n}" }, "abstract": { "value": "In recent years, compression of large language models (LLMs) has emerged as an important problem to enable language model deployment on resource-constrained devices, reduce computational costs, and mitigate the environmental footprint of large-scale AI infrastructure. In this paper, we lay down the foundation for LLM quantization from a convex optimization perspective and propose a quantization technique that builds on this foundation for optimum quantization outcomes. Our quantization framework, CVXQ, scales to models containing hundreds of billions of weight parameters and provides users with the flexibility to compress models to any specified model size, post-training. A reference implementation of CVXQ can be obtained from." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "weight quantization", "model compression", "large language models" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/d1858bbeb61a499b90b59ab49374b371ac2d4531.pdf" }, "presentation": null, "primary_area": { "value": "optimization" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/78fc7cfc8191c6ce2f5a0abf568ce25668ee790c.zip" }, "title": { "value": "LLM Compression with Convex Optimization—Part 1: Weight Quantization" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0TSAIUCwpp
Diffusion-based Extreme Image Compression with Compressed Feature Initialization
main
Active
extreme image compression;diffusion models;compressed feature initialization;residual diffusion
generative models
3;3;5;6
4;4;4;4
2;2;2;3
2;2;2;3
2;3;2;3
4.25
4
2.25
2.25
2.5
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weakness." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.\tThe paper is well-written and easy to follow, and the experiments are detailed and comprehensive.\n2.\tThis paper reduces computational complexity by reducing the denoising step, which is valuable for resource-constrained environments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces RDEIC, a novel diffusion model for extreme image compression that accelerates the denoising process through compression feature initialization. It draws on techniques from several papers, e.g., the codec framework scheme in GLC[1], and the control net in deffeic[2].The results provide evidence that the proposed scheme achieves SOTA performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tThe paper has limited innovation. Its pipeline looks like a simple combination of GLC[1] and deffeic[2], utilizing the codec framework of GLC[1] and the generative model of deffeic[2]. However, the paper does not compare performance with GLC[1].\n2.\tThis paper adopts a better performance generative model RDD instead of stable diffusion, and with the stronger generative ability of RDD, better performance is obtained. So if DiffEIC-50 also adopts RRD, will it achieve better performance?\n3.\tThe conclusions of some visualization experiments are not rigorous enough. For example, in Fig. 1, despite the obvious subjective quality improvement of RDEIC, its bit rate is 7.5% higher than deffeic[2]. A similar problem can be observed in Figure 5.\n4.\tSome analysis needs to be included to show why RDEIC is worse than MS-ILLM on the NIQE metric.\n\n[1] Jia Z, Li J, Li B, et al. Generative Latent Coding for Ultra-Low Bitrate Image Compression. CVPR 2024.\n\n[2] Zhiyuan Li, Yanhui Zhou, Hao Wei, Chenyang Ge, and Jingwen Jiang. Towards extreme imagecompression with latent feature guidance and diffusion prior. IEEE Transactions on Circuits and Systems for Video Technology, 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Figure 4 shows that the authors' method does not achieve the best results on metrics such as PSNR, MS-SSIM, and SSIM, and there is a significant gap compared to other methods. Noting that PSNR MS-SSIM, and SSIM are metrics used to evaluate fidelity. This is inconsistent with the authors' motivation. In the abstract, the authors mention that the proposed method aims to address the limitations of fidelity and efficiency.\n\nThe authors mention in their experiments that they trained five models, each corresponding to different λ_r values. However, in the comparative experiments (e,g, Tab.1, Tab. 2, Tab. 3, Fig. 4, Fig.5, Fig. 6, Fig.7, etc.), the authors do not specify which model's results were used. In addition, the author did not mention the guidance scale values used for these experimental results.\n\nIn Tab. 3, the author uses 2/5 in the DS column, so it is unclear whether the performance in the table refers to the 2-step model or the 5-step model. In addition, just using distortion of BD-rate or perception of BD-rate is not clear. The distortion includes PSNR, and SSIM, etc. and perception includes DISTS, FID, and LPIPS, etc. It is not clear which metrics distortion and perception represent respectively. The author should provide detailed results for metrics such as PSNR, SSIM, and LPIPS. Meanwhile, in the paper comparing the methods (PerCo, MS-ILLM), they did not use the bd-rate metric. Therefore, it is a good choice that the author just employs the values of PSNR, SSIM or LPIPS to demonstrate the performance and not use BD-rate.\n\nIn Tab. 2, the BD-rate of RDEIC with 2 DS is 0, while the BD-rate of RDEIC with 2 DS is also 0. So, which is the anchor in Tab. 2." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper is clear in describing its contributions and methodology. \nThe experimental arrangement is relatively reasonable, and the ablation study can prove the effectiveness of the strategies proposed by the author." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes Relay Residual Diffusion Extreme Image Compression method to achieve fidelity and efficiency. In particular, this paper use latent feature with added noise as the start point and employ residual diffusion to improve the fidelity. And this paper proposes a fixed-step fine-tuning strategy to reduce the number of steps." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The novelty is limited. Firstly, adding noise to the latent features is a common operation, which is used in many papers [1,2]. Secondly, the proposed residual diffusion is similar to ResShift[3]. The author should fully research the diffusion-based methods on low-level vision tasks published in the past two years and analyze the differences between them better. \n[1] SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution CVPR23\n[2] Pixel-Aware Stable Diffusion for Realistic Image Super-Resolution and Personalized Stylization ECCV24\n[3] ResShift: Efficient Diffusion Model for Image Super-resolution by Residual Shifting NIPS23\n\nThe motivation is not clear. In the third paragraph of Sec. 1, the author analysis the limitations of diffusion-based methods. The first limitation is ‘these methods rely on an iterative denoising process to reconstruct raw images from pure noise which is inefficient for inference’. The second limitation is ‘initiating the denoising process from pure noise introduces significant randomness, compromising the fidelity of the reconstructions.’ In addition to adding noise to the latent features, the author also employs a residual diffusion process and employ pre-trained stable diffusion to address these limitations. It is not clear remains unclear how residual diffusion and pre-trained stable diffusion can resolve the randomness caused by pure noise and improve the fidelity of the reconstructions.\n\nThere are two doubts about controllable detail generation. Firstly, the pre-trained stable diffusion is used to obtain low-frequency information. Since the pre-trained stable diffusion has not seen the inputs in the authors' task, why can it produce the expected results? Secondly, why did the authors choose to use pre-trained stable diffusion instead of directly using CFG?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1) Include a detailed analysis of the computational efficiency and resource requirements of RDEIC.\n2) Expand the comparative analysis to include more baseline models and state-of-the-art techniques.\n3) Address the possibility of overfitting by incorporating additional validation datasets or robustness tests." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1) Introduces an innovative framework (RDEIC) that improves image compression efficiency.\n2) Effectively addresses the fidelity issues present in existing diffusion-based methods.\n3) Provides strong experimental results demonstrating the advantages of the proposed approach." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a novel approach called Relay Residual Diffusion Extreme Image Compression (RDEIC), which improves upon traditional diffusion-based image compression methods. By leveraging compressed latent features and a residual diffusion process, RDEIC enhances fidelity and efficiency, addressing limitations of iterative denoising processes that typically begin with pure noise. Experimental results indicate significant performance gains in compression rates while maintaining image quality." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1) Limited discussion on the computational complexity of the new method.\n2) Insufficient comparison with a broader range of existing compression techniques.\n3) Potential overfitting concerns not addressed within the experimental analysis." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Definition of \"Extremely Low Bitrates\". The standard for \"extremely low bitrates\" lacks a precise definition. Given varying content distributions (scenes) and the amount of high-frequency details, might \"extremely low\" have different thresholds? How would one define this threshold? Could the authors discuss the broader application potential of encoding methods in bandwidth-constrained scenarios? Additionally, does diffusion lose its value in compression at higher and medium bitrates?\n\n2. Codebook Details. The approach involving \"vector-quantized latent image representations\" is intriguing. Could the authors elaborate on the learning and training process of the codebook loss? Specifically, how is the codebook initialized, and what is the interaction between the codebook and $ l_p$?\n\n3. Since the multi-step sampling mechanism in diffusion leads to increased computational complexity in decoding, would placing diffusion in the encoding part or within the hyperprior yield different conclusions regarding complexity?\n\n4. Role of the diffusion mechanism. Is diffusion effective mainly as a post-processing module to enhance perceptual quality, or does it also contribute to compact representation? A deeper analysis of the role of diffusion in improving perceptual quality versus compact representation would be insightful." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. Given the effectiveness and complexity of diffusion models, fast diffusion sampling as a practical research approach holds significant value and positively impacts the community.\n\n2. The balance between smoothness and sharpness mentioned in the paper provides practical insights into this area. In a given compression state, determining how to map it to the sampling step \n𝑁 can directly affect reconstruction quality. This mapping relationship is crucial to the model's effectiveness and stability, which the authors have explored in detail." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces Relay Residual Diffusion Extreme Image Compression (RDEIC), a method for high-quality image compression at extremely low bitrates. RDEIC has three main components: (1) it begins denoising with compressed latent features plus noise instead of pure noise, reducing steps and improving fidelity; (2) it introduces a relay residual diffusion process, iteratively removing noise and residuals between compressed and target features, leveraging a pre-trained stable diffusion model for quality reconstruction; and (3) it applies a fixed-step fine-tuning strategy to minimize discrepancies between training and inference, further enhancing quality. Experimental results show that RDEIC achieves state-of-the-art visual quality, surpasses existing diffusion-based methods in fidelity and efficiency, and provides controllable detail generation to balance smoothness and sharpness." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The novelty of this work is relatively modest, though it provides a valuable practical application in image compression. Many recent studies have explored similar approaches, starting the diffusion process from low-quality images rather than pure noise to enhance efficiency and accelerate sampling. Integrating degraded image embeddings into a pre-trained diffusion model as a plug-and-play module is also a relatively well-explored approach in the field of image processing. Prior works include:\n - [1] Lin, X., He, J., Chen, Z., Lyu, Z., Dai, B., Yu, F., ... & Dong, C. (2023) in \"DiffBIR: Towards Blind Image Restoration with Generative Diffusion Prior\",\n - [2] Wang, Y., Yu, Y., Yang, W., Guo, L., Chau, L. P., Kot, A. C., & Wen, B. (2023) in \"ExposureDiffusion: Learning to Expose for Low-Light Image Enhancement\" (ICCV),\n - [3] Ma, J., Zhu, Y., You, C., & Wang, B. (2023) in \"Pre-trained Diffusion Models for Plug-and-Play Medical Image Enhancement\" (MICCAI).\n\n2. The Text-Sketch in Figure 1 and Figure 5 shows significant deviations in chroma reconstruction. I am unsure whether this is due to the baseline itself or if there was a mix-up between RGB and BGR channels during the experimental preprocessing stage. Additionally, the brightness of PerCo-20 in Figure 1 appears to be slightly biased compared to the ground truth. It is recommended to carefully examine the methods used for comparison, especially when the baselines are highly novel, and when results show noticeably unusual behavior, to ensure a fairer comparison.\n\n3. Potential issue with variable control in the entropy model. The paper employs unusual entropy models (i.e., VQ-E and VQ-D) without adequate control or detailed explanation. This may lead to comparison results that do not accurately reflect the primary contribution of the proposed approach when contrasted with other algorithms, given that the precision of entropy models directly impacts compression efficiency and reconstruction quality.\n\n4. Ambiguity in baseline selection. In Table 1 and Line 354, using “Ours” as the baseline results in a row of zeros, which may lead to ambiguity and does not align with traditional statistical practices (which typically use a control group as the baseline). It is advisable to clarify the baseline in the caption or table notes. Additionally, selecting a well-recognized baseline (e.g., JPEG, BPG, or a state-of-the-art compression method) for BD-rate comparison would provide a more straightforward understanding of the relative performance of each method.\n\n5. Scoring issue with implementation versions. In Lines 442-443, the authors mention two implementation versions, yet both report a BD-rate of 0, which may cause confusion. It is recommended to provide a detailed explanation of the different implementations and clarify the reason for the BD-rate of 0 in each case.\n\n6. Suggestions for improving formula clarity: \n - Clarity in the derivation from Eq.2 to Eq.4. The derivation from Eq.2 and Eq.3 to Eq.4 is crucial for the model’s structure but is not immediately clear. This derivation could directly impact the model's efficiency and accuracy. It is recommended to provide a more detailed explanation of these key steps in the main text to enhance understanding.\n \n - Ambiguity in the Definition of Eq.11. In traditional diffusion models (e.g., DDPM and Stable Diffusion), the noise estimator typically predicts total noise rather than noise at specific frequency bands. Interpreting $ \\epsilon_{sd}(z_n, n) $ directly as a \"low-frequency component\" may lack theoretical support, especially without a clear basis for frequency division. The decomposition of predicted noise into low- and high-frequency components might be a heuristic approach, but further justification is needed to establish its rigor.\n \n - Undefined $ l_p $ in Eq.9. The definition of $l_p $ in Eq.9 is unclear. To improve understanding, it would be helpful for the authors to clearly specify the meaning of $ l_p $ and provide relevant context.\n\n7. Minor formatting and typographical suggestions. \n - Line 100: Add commas before and after \"i.e.\" for clarity.\n - Lines 220, 229, and 238: Add commas at the end of formulas to improve readability." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "This paper presents an efficient diffusion-based extreme image compression model that significantly reduces the number of denoising steps required for reconstruction." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024diffusionbased,\ntitle={Diffusion-based Extreme Image Compression with Compressed Feature Initialization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0TSAIUCwpp},\nnote={under review}\n}" }, "abstract": { "value": "Diffusion-based extreme image compression methods have achieved impressive performance at extremely low bitrates. However, constrained by the iterative denoising process that starts from pure noise, these methods are limited in both fidelity and efficiency. To address these two issues, we present $\\textbf{R}$elay $\\textbf{R}$esidual $\\textbf{D}$iffusion $\\textbf{E}$xtreme $\\textbf{I}$mage $\\textbf{C}$ompression ($\\textbf{RDEIC}$), which leverages compressed feature initialization and residual diffusion. Specifically, we first use the compressed latent features of the image with added noise, instead of pure noise, as the starting point to eliminate the unnecessary initial stages of the denoising process. Second, we design a novel relay residual diffusion that reconstructs the raw image by iteratively removing the added noise and the residual between the compressed and target latent features. Notably, our relay residual diffusion network seamlessly integrates pre-trained stable diffusion to leverage its robust generative capability for high-quality reconstruction. Third, we propose a fixed-step fine-tuning strategy to eliminate the discrepancy between the training and inference phases, further improving the reconstruction quality. Extensive experiments demonstrate that the proposed RDEIC achieves state-of-the-art visual quality and outperforms existing diffusion-based extreme image compression methods in both fidelity and efficiency. The source code and pre-trained models will be released." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "extreme image compression", "diffusion models", "compressed feature initialization", "residual diffusion" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/a9f408144be9a39d3e10b155acabb468cc50a923.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Diffusion-based Extreme Image Compression with Compressed Feature Initialization" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0Th6bCZwKt
Gaussian Mixture Models Based Augmentation Enhances GNN Generalization
main
Active
Graph Neural Networks;Data Augmentation
learning on graphs and other geometries & topologies
1;5;5;6
5;4;3;3
3;3;2;3
1;2;3;3
3;3;2;3
4.25
3.75
2.75
2.25
2.75
-0.902829
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I do not understand Theorem 1. Please expand explanation." }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Data augmentation for graphs is a topic that warrants investigation. There is little work on the subject. \nNumerical evaluations are comprehensive" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper discusses data augmentation for graphs. The concrete proposal is to use a Gaussian mixture model. The justification for this proposed approach is that Gaussian mixture models are universal density estimators." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The proposed data augmentation method is not specific to graphs. It could apply to any data type. No arguments are given as to whether this is suitable way of augmenting graph datasets.\n\nNumerical evaluations are comprehensive but underwhelming. Improvements are marginal relative to training without data augmentation. All but 1 improvement in Tables 1 and 2 are well within one standard deviation and can be explained by random chance." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- In the case of GMM what does the parameter $\\lambda$ represent ?\n- Does the GMM-based sample generation method ensure that the augmented samples remain within the distribution of the original data?\n - Can the method be extended to different tasks on graphs? such as node classification and link prediction? \nComment: \nIf the maximum of the expectations $\\mathbb{E}_{\\lambda}[\\mathcal{G}_n^{\\lambda} - \\mathcal{G}n]$ is non-zero, the Rademacher complexity of $\\ell{aug}$ may not necessarily be smaller than the Rademacher complexity of $\\ell$." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The proposed approach introduces a novel method for graph data augmentation. \n- The problem studied is relevant and interesting\n- The algorithm's time complexity is analyzed." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper the authors introduce a method to perform data augmentation algorithm for graph datasets. \nThe algorithm leverages Gaussian Mixture Models (GMM) to find the maximum likelihood estimates for each cluster given by the embedding ofdifferent classes. Finlly they use the GMM to generate augmented data. Notice that augmented samples are generated directly in. the embedding space. The authors provide a bound for the Rademacher complexity fro the loss function modified to account for augmented data." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The clarity of the paper, particularly in Section 3, could be enhanced to guide the reader more effectively through the discussion.\n- The method requires pre-training of the model and moreover is dependent on the specific model architecture, meaning that the augmented dataset cannot be used by other GNN models.\n- Baselines in Table 1 could be expanded to include additional augmentation techniques, such as edge insertion and feature drop.\n- The metric used to measure the distance between $\\mathcal{G}^{\\lambda}$ and $\\mathcal{G}$ in Theorem 3.1 and subsequent sections is not clearly defined." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "None" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.\tBased on theorem 3.1, the motivation of the proposed method is to guarantee the alignment of the augmented data and original data, so the authors apply GMM to fit the embeddings of the training data. But there lacks an explanation of why GMM is applied, it seems that a simple DNN (or other more complex generative data augmentation techniques) can also fit the embeddings. Please explain the advantages of GMM. \n\n2.\tIt seems that only the post-readout function is trained by the combination of augmented data and original data (line 264). Why do not take several more iterations, and update the parameters of message passing layers? Please explain why only generate the embeddings of training data once but not re-generate them after updating the network. \n\n3.\tFigure 2 shows the influence scores of the augmented embeddings on different datasets. But the authors did not analyze why in dataset DD their algorithm perform worse. This is an interesting phenomenon and worth a deeper analysis. \n\n4.\tThe authors claim that GMM-GDA is efficient in the augmentation steps and training steps and provide results in table 6 (line 315). But table 6 did not show the efficiency of GMM-GDA since it still cost much augmentation time or training time. Please explain why such a conclusion can be drawn from table 6. \n\n5.\tIn the result of table 1&2, it seems that GMM-GDA has a better performance in the setting of GIN compared with GCN. This phenomenon worth a deeper analysis. \n\n6.\tThe authors claim that the configuration models (line 470) is part of an ablation study, but it is hard to understand. Please explain more clearly about the conclusion of this experiment." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1.\tThis paper analyzes the problem of GNN generalization capability, which is well-written and clearly clarified. The whole paper is easy to understand.\n\n2.\tThis paper provides theoretical insights before presenting the algorithm. \n\n3.\tThe experiments of this paper is closely related to the research goal proposed." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposed GMM-GDA, a graph data augmentation algorithm with better generalization abilities and faster training speed. GMM-GDA is presented based on a theoretical analysis relying a Rademacher complexity, which bounded the generalization error by the difference between the augmented data and original data. Furthermore, this paper verified the effectiveness of GMM-GDA from the perspective of influence functions and detailed experiments show the priority of the proposed algorithm." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tThis paper did not explain the necessity of performing data augmentation by GMM. The authors should strengthen the explanation of the relationship between theory and the algorithm. \n\n2.\tSome tables and figures do not seem to support the conclusions in the paper, which needs more explanation." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Q1: The number of Gaussian distributions $K$ in GMMs is an important hyperparameter and has impact on the performance of GMM. How do you properly choose this hyperparameter in practices? Please describe the tuning process of $K$ or provide hyperparameter sensitivity analysis to show how performance varies with different values of $K$.\n\nQ2: In line 349, the subscripts $i,j$ of the notation $\\\\mathcal{L}^{aug}_{i,j}$ are ambiguous. I think the correct one should be $\\\\mathcal{L}^{aug} _ {n,m}$.\n\nQ3: The proof between line 703 to line 715 seems confusing. The first equality only holds when the right hand side also includes a expectation w.r.t. $\\lambda_{n,m}$. Indeed, I think the proof should be proceeded as\n\\begin{equation}\n\\left\\Vert \\\\frac{1}{N} \\\\sum_{n=1}^N \\\\mathbb{E}_{\\\\lambda \\sim \\\\mathcal{P}} [\\ell(\\\\mathcal{G}_n,\\theta) - \\ell(\\\\mathcal{G}'_n,\\theta) ] \\right\\Vert = \\\\left\\\\Vert \\\\frac{1}{N} \\\\mathbb{E} _ {\\\\lambda \\\\sim \\\\mathcal{P}} [\\ell(\\\\mathcal{G} _ k,\\theta) - \\ell(\\\\mathcal{G}' _ k,\\theta)] \\\\right\\\\Vert \\leq \\\\frac{1}{N} \\\\mathbb{E} _ {\\\\lambda \\\\sim \\\\mathcal{P}} [\\Vert \\ell(\\\\mathcal{G}_k,\\theta) - \\ell(\\\\mathcal{G}'_k,\\theta) \\Vert ] \\leq \\frac{1}{N}.\n\\end{equation}\nThe first equality is obtained by your claim that $\\\\mathcal{G} _ k = \\\\mathcal{G}' _ k$ for $k = 1,\\ldots, N$ and $k \\neq n$. The last inequality is obtained by $\\ell(\\cdot) \\in [0,1]$.\nPlease clarify this issue or correct this part of proof following the above steps.\n\nQ4: In line 754, you claim that $\\\\hat{\\theta} _ {aug}$ is the optimal parameter of the loss $\\frac{1}{N} \\\\sum_{n=1}^N \\\\mathbb{E} _ {\\\\lambda \\sim \\\\mathcal{P}} [\\ell(\\\\mathcal{G}^{\\\\lambda} _ n,\\theta)]$, which is different from the definition of $\\\\hat{\\theta}_{aug}$ in line 195 where $\\\\hat{\\theta} _ {aug} = \\\\mathop{\\rm argmin} _ {\\theta} \\\\frac{1}{NM} \\\\sum _ {n=1}^N \\\\sum _ {m=1}^M \\ell(\\\\mathcal{G} _ n^{\\\\lambda _ {n,m}}, \\theta)$. This could make the inequality $v_3 \\leq 0$ do not hold. Please clarify and check the definition of $\\\\hat{\\theta} _ {aug}$. If the above issue do exist, you should consider revising your proof accordingly.\n\nQ5: In line 221-223, you claim that minimizing the term $\\\\mathbb{E} _ {\\\\mathcal{G} \\sim G} \\\\mathbb{E} _ {\\\\lambda \\sim \\\\mathcal{P}} [\\Vert \\\\mathcal{G}^{\\\\lambda} - \\\\mathcal{G} \\Vert ]$ can guarantee with a high probability to decrease both the Rademacher complexity and the generalization risk. And you also show that the Rademacher complexity term $\\\\mathcal{R}(\\ell _ {aug})$ is upper bounded by $\\\\mathop{\\rm max} _ {n=1,\\ldots,N} \\\\mathbb{E} _ {\\\\lambda \\sim \\\\mathcal{P}} [ \\Vert \\\\mathcal{G}^{\\\\lambda} _ n - \\\\mathcal{G} _ n \\Vert ]$, which is a empirical estimation of $\\\\mathbb{E} _ {\\\\mathcal{G} \\sim G} \\\\mathbb{E} _ {\\\\lambda \\sim \\\\mathcal{P}} [\\Vert \\\\mathcal{G}^{\\\\lambda} - \\\\mathcal{G} \\Vert ]$ w.r.t. $\\\\mathcal{G}$. Therefore, minimizing the term $\\\\mathbb{E} _ {\\\\mathcal{G} \\sim G} \\\\mathbb{E} _ {\\\\lambda \\sim \\\\mathcal{P}} [\\Vert \\\\mathcal{G}^{\\\\lambda} - \\\\mathcal{G} \\Vert ]$ may not guarantee to decrease the Rademacher complexity term $\\\\mathcal{R}(\\ell _ {aug})$. Please clarify this issue or modify your claim in line 221-223." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The proposed technique is reasonable and efficient. The experimental results provided in the paper are sufficient to support the effectiveness of this technique. Considering the balance between effectiveness and computational efficiency of this technique, it has a potential application on real-world scenarios. Besides, the paper is well-written and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose a novel graph data augmentation based approach to tackle the graph OOD problem. To be specific, they first train a GNN model using the training data. Then, graphs within each class in the training data are fed to the GNN model and the output of the readout layer are treated as the embeddings of this class. After that, the authors propose to fit a Gaussian Mixture Model (GMM) on the embeddings for each class using the classical EM algorithm. Finally, the augmented embeddings are generated by sampling from the GMMs for each class, which are combined with the embeddings of training data and used for fine tuning the post-Readout function. The proposed framework enjoy a high computation efficiency since the post-Readout function contains only a linear layer and the (time) complexity of fitting GMMs is linear. The authors also provide some theoretic analysis. First, they analyze the excess risk of the graph augmentation approach and the result shows that minimizing the expected distance between original graphs and augmented ones could reduce the excess risk. Second, they use influence functions to quantify the affect of augmented data on model's performance on testing data. Experimental results show that the their proposed method has competitive performance against baselines and has a significant benefit on advantages in robustness against structure corruption and time complexity." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Although using a generative models to learn the graph representation distribution is reasonable, the motivation of adopting GMMs is still unclear. GMM is universal approximator of densities could be one of the reasons, yet how many component need to achieve a small approximation error is unknown. Besides, the theoretic results provided in this paper do not seem to explain why this method that generating augmentations in representation space is superior or comparable to previous methods that generating augmentations in data space, which could be a promising direction to be explored." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024gaussian,\ntitle={Gaussian Mixture Models Based Augmentation Enhances {GNN} Generalization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0Th6bCZwKt},\nnote={under review}\n}" }, "abstract": { "value": "Graph Neural Networks (GNNs) have shown great promise in many learning tasks, notably including node and graph classification, but they face difficulties when tested on new or unseen data. These challenges are exacerbated when training data is limited in size or diversity. To address this issue, we introduce a theoretical framework using Rademacher complexity to compute a regret bound on the generalization error and then characterize the effect of data augmentation. This framework informs the design of GMM-GDA, a new, efficient graph data augmentation (GDA) algorithm leveraging the capability of Gaussian Mixture Models (GMMs) to approximate any distribution. Our approach not only outperforms existing augmentation techniques but also offers improved time complexity, making it highly suitable for real-world applications." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Graph Neural Networks", "Data Augmentation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/a0dab2c6826119f891b03abddd950bfb82dfcc66.pdf" }, "presentation": null, "primary_area": { "value": "learning on graphs and other geometries & topologies" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/ab81bf5c82e01d8ef4c2aa8c84ceab95b004690b.zip" }, "title": { "value": "Gaussian Mixture Models Based Augmentation Enhances GNN Generalization" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0UCkWfcfb9
OPTune: Efficient Online Preference Tuning
main
Active
Efficient RLHF; Online DPO;
foundation or frontier models, including LLMs
3;3;5;5;5
3;5;4;3;4
2;1;3;3;3
2;1;2;2;2
1;2;3;2;3
4.2
3.8
2.4
1.8
2.2
-0.218218
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. A choice of small $\\rho$ can speed up the training process. However, we may improve the efficiency by directly reduce the training epochs while enlarging the learning rate, which may bring more significant speedups. Will an online training method with dynamic learning rate adjustment have better efficiency?\n2. Samples whose reward gap between positive and negative responses is high may dominate the learning loss. Does the training curve show more significant instability than DPO?\n3. Will the un-re-generated responses be over-optimized?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. OPTune achieves notable computational savings in data generation and training, reducing costs for online RLHF while preserving alignment quality.\n2. By focusing on low-reward prompts, OPTune avoids unnecessary regeneration, which is a pragmatic approach to improve efficiency.\n3. Using weighted DPO loss changes binary signals to dense signals, improving improving alignment through prioritizing high-utility samples." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper targets LLM alignment with human preferences in an online manner. OPTune involves two main strategies to reduce computational costs while maintaining alignment quality, including selective generation and weighted DPO loss. The authors conduct experiments using OPTUNE with LLMs and report a 1.27–1.56x speedup in training while maintaining or even improving model alignment." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The choice of the ratio of re-generated prompts $\\rho$ can be a key factor of OPTune. Though the authors conduct experiments with different $\\rho$s, the authors do not provide direct insights on how to choose $\\rho$ to balance between efficiency and performance.\n2. Online DPO (without weighted loss) should be the most related baseline for this paper. Though some experiments are conducted, the authors do not sufficiently evaluate OPTune's superiority over online DPO.\n3. In Table 3, the performance in TrustfulQA is incorrectly bold. The offline DPO model has higher performance.\n4. The choice of $\\beta_2$ in the weighted loss is significant while the authors do not reveal any insight or related experiments on it." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "How to rank the response pairs? Do you use the average rewards of preferred and less preferred responses? Is there a better prompt selection method suitable for wDPO?\n\nRegarding the experimental configuration in Table 1: How many responses were generated for each prompt?\n\nDid the author observe overoptimization reusing the same prompts for each iterations?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is well-written and well-orginized.\n\nConsidering online DPO takes more time than the original offline method, improving its efficiency is of great significance." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper propose OPTune, an approach to enhance the both generation and training efficiency of online preference tuning for LLMs alignment.\nTo improve the generation efficiency, OPTune selects prompts whose regenerated responses are likely to provide more informative and higher-quality training signals.\nIn addition, weighted DPO is proposed to improve the training efficiency by modelling the reward gap of response pairs.\nEmpirical results show that LLMs tuned with OPTune maintain instruction-following benefits and achieve faster training speeds compared to standard preference tuning methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Given that iterative DPO often utilizes different prompts in different iterations [1] for avoid overfitting or overoptimization [2], it is not clear how the proposed method can be used in such scenarios.\n\nThe performance of the models corresponding to different selection ratios in Table 2 is not very different and is generally low, which cannot explain the effectiveness of the method.\n\n\nReferences:\n\n[1] Meng Y, Xia M, Chen D. Simpo: Simple preference optimization with a reference-free reward[C]. NeurIPS, 2024.\n\n[2] Rafailov R, Chittepu Y, Park R, et al. Scaling laws for reward model overoptimization in direct alignment algorithms[J]. arXiv preprint arXiv:2406.02900, 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Would it be possible to provide some sort of conceptual grounding for your proposed prompt selection strategy? I could imagine a connection to the pessimism principle in RL." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Show across multiple experiments that the proposed strategy outperforms a random selection strategy." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose a more generation-efficient procedure for online RLHF by preferentially sampling responses from prompts that had low rewards and weighting samples by the reward gap in the online DPO loss." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Lack of relevant baselines on sample selection: a pretty common strategy in RLHF is to pick prompts that had the largest \"margin\" between the winner and the loser for further training (e.g. https://arxiv.org/abs/2404.03715). Could you compare your strategy against this technique?\n\n- Lack of relevant baselines on policy optimization: a variety of papers have already noted that IPO / DPO ignore the gap in reward between the winning and losing completions. Could you compare against at least one of these (e.g. REBEL: https://arxiv.org/abs/2404.16767)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Why didn’t the authors choose a stronger model from the Zephyr series as a baseline, or conduct comparisons with other models like Llama-3-Instruct-8B-WPO-HB-v2?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The writing is relatively clear." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper addresses the high-cost issue of online alignment. It proposes a method aimed at improving the efficiency of online alignment, specifically consisting of two parts. First, only the lowest-rewarded responses generated under the latest LLM policy are regenerated and updated. Second, the loss function is modified to assign higher weights to response pairs that contribute more during training. \nSimilar methods to the two improvements proposed in this paper have already emerged within the community. Further, the experimental section lacks a proper evaluation of the improvements due to the choice of an outdated and subpar baseline Zephyr 7B Beta (Alpaca eval rank 131)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Lack of Innovation\nOver the past year, the alignment community has proposed numerous methods similar to those used in this paper. As early as the Llama 2 Technical Report, the approach of directly incorporating the score difference between two responses into the loss function was introduced. Although the Llama 2 Technical Report is cited in the Related Work section, there is no comparative discussion with Llama 2 or other similar works in Section 3.2.\n2. Incomplete Experiments and Lack of Analysis\nThis paper introduces a scaling factor, beta 2, to amplify the score difference and combines it with the original DPO loss function via multiplication rather than addition. However, the motivation behind this approach is not explained. More importantly, there is no ablation study to compare the impact of different values for the scaling factor beta 2 or other ways of incorporating score differences.\n3. Unconvincing Choice of Baseline\nThe entire experimental section only includes the Zephyr model as a baseline, with no comparisons to other baselines. \nCurrently, on the alpaca_eval board(https://tatsu-lab.github.io/alpaca_eval/) Zephyr-7B-Beta has a win rate of 13.2%, ranking 131st. When controlling for model size and listing only models with 8B or fewer parameters, there are other fine-tuned models based on comparable foundation models. These include the Gemma series (e.g., Gemma-2-9B-it-SimPO, rank 8; Gemma-2-9B-it-DPO, rank 10), Llama3-based fine-tuned models like Llama-3-Instruct-8B-WPO-HB-v2 (rank 20), and Mistral 7B-based models like Storm-7B (rank 24). Without comparisons to any of these other similarly sized fine-tuned models, the paper’s conclusions are difficult to accept.\n\nLongitudinally, the Zephyr model has several later versions, including FsfairX-Zephyr-Chat-v0.1 (rank 50, LC win rate 34.8%), ExPO + Zephyr 7B Beta (rank 128, LC win rate 14.0%), and Zephyr 7B Beta (rank 131, LC win rate 13.2%). The paper only selected Zephyr 7B Beta, which ranks last among these, as its baseline, with a win rate only 40% of the current best Zephyr model. Additionally, instead of using the common win rate metric, the paper employs win score, making it difficult to directly compare the performance of Optune against existing models." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "see above" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The experiment is performed on a 7B model and the result suggests that with a right ratio for regeneration, the proposed method indeed improves training efficiency without decreasing the performance.\n2. The experiment has reasonable comparison with random subselection and shows that random subselection does not work as well as the proposed method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes two improvement for online preference learning: one is reward-based prompt selection, and the other one is weighted DPO. For reward-based prompt selection, the paper proposes that between each iteration of online DPO, one should only regenerate certain proportion of the reponses to the prompts that have the lowest rewards. For weighted DPO, the paper proposes to add a weight term in the DPO loss based on the reward difference in each pair of generation. The paper performs experiments to show that, with reward-based prompt selection, for both DPO and wDPO loss, selecting the right portion of regeneration will improve the efficiency of online DPO without sacraficing the performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. It is unclear to me that ranking the prompts by absolute reward makes sense, especially if the reward model is trained by BT loss. For each fixed prompt, the BT loss only cares about the difference between two responses, so difference prompts may induce a difference biased of the corresponding completion. Thus having a low reward does not necessarily mean that the model is currently performing bad on the prompt. Honestly I might be describing the procedure wrong because I don't see a clear definition of \"ranking prompt by reward\" unless I am missing something.\n\n2. Also it is unclear to me why the wDPO loss makes sense. If the reward gap between two generations are large, it is likely that the pair is already easy for the model, and the term might not even contribute to the training - why not doing the inverse weight?\n\n3. There is no information how table 1 is generated, and it seems like it is the major motivation for the proposed method. More details should be provided, especially to show that all procedures are fully optimized - for example, for generation it seems that using vllm to speed up the inference is the common approach.\n\n4. Frankly, the paper lacks basic rigors. \n- In section 3.1, the important concept of \"reward gain\" is not defined so the motivation part is very confusing.\n- In line 7 of alg 1, the prompt $x^i$ is already popped, then from line 12 should we never see the recently added pairs in $\\mathcal{R}_t$?\n- In line 21 of alg 1, how is the ranking computed?\n- nits: a) in eq (2), the two terms inside KL are not distribution. 2) eq (2) uses $\\alpha$ and the following uses $\\beta$. 3) in line 167 $\\mathcal{P}$ is not defined." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024optune,\ntitle={{OPT}une: Efficient Online Preference Tuning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0UCkWfcfb9},\nnote={under review}\n}" }, "abstract": { "value": "Reinforcement learning with human feedback~(RLHF) is critical for aligning Large Language Models (LLMs) with human preference.\nCompared to the widely studied offline version of RLHF, \\emph{e.g.} direct preference optimization (DPO), recent works have shown that the online variants achieve even better alignment. \nHowever, online alignment requires on-the-fly generation of new training data, which is costly, hard to parallelize, and suffers from varying quality and utility.\nIn this paper, we propose a more efficient data exploration strategy for online preference tuning, OPTune, which does not rely on human-curated or pre-collected teacher responses but dynamically samples informative responses for on-policy preference alignment. \nDuring data generation, OPTune only selects prompts whose (re)generated responses can potentially provide more informative and higher-quality training signals than the existing responses. \nIn the training objective, OPTune reweights each generated response (pair) by its utility in improving the alignment so that learning can be focused on the most helpful samples. \nThroughout our evaluations, OPTune'd LLMs maintain the instruction-following benefits provided by standard preference tuning whilst enjoying 1.27-1.56x faster training speed due to the efficient data exploration strategy." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Efficient RLHF; Online DPO;" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/87ff1a3a8c554e664191230af78fed300b67a133.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "OPTune: Efficient Online Preference Tuning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0UCoWxPhQ4
SAVA: Scalable Learning-Agnostic Data Valuation
main
Active
Data Valuation;Optimal Transport;Data Selection;Active Learning
other topics in machine learning (i.e., none of the above)
5;6;6
3;4;4
3;3;3
2;3;3
3;3;3
5.666667
3.666667
3
2.666667
3
1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Other than hierarchical OT and the proposed implementation, there are some other ideas for mitigating OT efficiency issues.\n\nSome standard approaches include low-rank approximation to the transportation matrix C, which is often possible for practical cases. This allows representing the large matrix C with multiplication of smaller matrices and avoids directly materilizing the large matrix C and OOM issues. \n\nAnother somewhat connected idea is to directly quantize the train and validation distributions (e.g., approximate the distributions via downsampling) to simplify the OT problem.\n\nHierarchical OT can also be conducted with clustering methods. For example, at the lower level, group all the samples into a number of clusters, and at the higher level, solve the OT problem between the centroids of clusters. \n\nIt will be very interesting to see how to connect the proposed framework to these ideas and whether they may help further improving the computation complexity or accuracy." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The problem is well-contextualized and the motivation is clear. Structure of the paper is well balanced and the elaborations are coherent. It is straightforward for readers to understand the scope and target of the paper and the proposed technical approaches.\n\nThe proposed method is plausible, leveraging the hierarhical OT framework to aggregate results from batch-wise OT computations and achieving favorable approximation results.\n\nDerivations are comprehensive and are paired with substantial elaborations. Empirical evaluations are diverse and abundant and the results are valid." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates of the problem of extending Optimal Transport (OT) distance-based data valuation methods for larger scale problems. The paper points out that for current methods, the quadratic overhead for expensive GPU memory constrained the scale of problems they can be applied to. Correspondingly, this paper proposes to compute the OT problem in a batch-wise fashion where the batch-wise results are aggregated via an hierarchical OT framework to produce data point valuations. This approach allows converting intractable large-scale OT problems into a series of smaller problems that can be solved efficiently. Empirical results on a variety of tasks show the proposed approach achieves competitive performance compared to original methods while being applicable to larger-scale problems." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I am still somewhat concerned about the computation overhead for SAVA. Even it avoids directly solving large-scale OT problems and circumvents OOM issues, it now requires solving a quadratic number of OT problems between every pair of batches and aggregating their results. This could also take a significant amount of time if the number of batches are high.\n\nAre there results on actual time comparisons for the methods in empirical studies?\n\nThe structure of the paper still has room to improve. The current layout is dense where there are many equations and lemmas interleaved with elaborations. There's an overhead for the readers to familiarize with the notations before being able to catch up with the ideas. It could be made more straightforward.\n\nFor example, the crucial Figure 1 and Algorithm 1 are not self-contained. Many of the involved notations are not straightforward and also not introduced in the captions. It still requires readers to first read through the texts and derivations to understand what is being done. Strongly suggests authors to make an effort to improve these visualizations, which could substantially improve the paper's accessibility and impact." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The experimental results are convincing. The authors compared to SOTA methods for data valuation across various data corruption scenarios. The results demonstrate that SAVA is scalable to large datasets. Also, the results included a dataset of size larger than 1 million samples, in which the proposed method outperforms benchmarks. \n\n- The writing is good and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper develops a variant of LAVA, called SAVA, for scalable data valuation. The idea is perform data valuation on batches of data instead of on the entire dataset. Extensive numerical results are presented to demonstrate SAVA's efficiency." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The reviewer's biggest concern is related to novelty. Currently, SAVA seems a very natural extension of LAVA for data valuation on batches. The submission seems to be on the incremental side, unless the authors can clearly state the technical challenge when calculating on batches. \n\n- The choice of batch size is a key hyper-parameter in SAVA (and key difference to LAVA). The authors are suggested to include formal theoretical analysis to quantify the tradeoff in choosing batch size between memory and calculation approximation. Also, Appendix G should appear in the main text. \n\n- The authors are suggested to include a table comparing the complexities of LAVA and SAVA. \n\n- What happens if the validation dataset gets corrupted? \n\n- In Fig. 3, why is the performance of SAVA dropping at .4 proportion?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "pls see W2" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "[S1] An interesting approach leveraging the idea of batches to solve the memory bottleneck encountered in OT solver as optimizer in model training.\n\n[S2] Detailed theoretical proofs and descriptions of previous work are given.\n\n[S3] The article is well-organized and easy to read." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a new learning-agnostic data valuation approach that assigns a value to each data point in the training set based on its similarity to the validation set. They introduce SAVA, a scalable variant of the LAVA algorithm, which uses optimal transport (OT) sensitivity to value training data without directly concerning model performance efficiently. Unlike LAVA requiring the entire dataset as input, SAVA operates on batches of data points, making it has a smaller memory consumption. Thus, SAVA can make the valuation taks of larger datasets possible. The authors conduct extensive experiments showing that SAVA can effectively scale to large datasets and maintain data valuation performance across various downstream tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "[W1] My biggest concern is the proof of the upper bound does not adequately explain why this proxy can work.  Detailed analysis on the upper bound of the proxy practicability should be taken.\n\n[W2] My second concern is that the paper lacks of time complexity analysis. And SAVA in Figure 2 seems to be no better than Batch-wise LAVA. In the appendix Figure 9, why not compare Batch-wise LAVA in running time metric? \n\n[W3] Typos: Line 417, \"Batch-wise LAVA KNN Shapley and\" -> \"Batch-wise LAVA, KNN Shapley, and\"\n\nPlacing Table 1 in Section 2 would help to improve understanding." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We use hierarchical optimal transport (OT) to scale OT-based data valuation methods to large datasets." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024sava,\ntitle={{SAVA}: Scalable Learning-Agnostic Data Valuation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0UCoWxPhQ4},\nnote={under review}\n}" }, "abstract": { "value": "Selecting data for training machine learning models is crucial since large, web-scraped, real datasets contain noisy artifacts that affect the quality and relevance of individual data points. These noisy artifacts will impact model performance. We formulate this problem as a data valuation task, assigning a value to data points in the training set according to how similar or dissimilar they are to a clean and curated validation set. Recently, LAVA ~\\citep{just2023lava} demonstrated the use of optimal transport (OT) between a large noisy training dataset and a clean validation set, to value training data efficiently, without the dependency on model performance. However, the LAVA algorithm requires the entire dataset as an input, this limits its application to larger datasets. Inspired by the scalability of stochastic (gradient) approaches which carry out computations on batches of data points instead of the entire dataset, we analogously propose SAVA, a scalable variant of LAVA with its computation on batches of data points. Intuitively, SAVA follows the same scheme as LAVA which leverages the hierarchically defined OT for data valuation. However, while LAVA processes the whole dataset, SAVA divides the dataset into batches of data points, and carries out the OT problem computation on those batches. We perform extensive experiments, to demonstrate that SAVA can scale to large datasets with millions of data points and doesn't trade off data valuation performance." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Data Valuation", "Optimal Transport", "Data Selection", "Active Learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/e3af73b698ac1d78645582d3b4b6f61d7e6cfdaa.pdf" }, "presentation": null, "primary_area": { "value": "other topics in machine learning (i.e., none of the above)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/49b6233f64cba4a34dbc687ab2f0c11946912804.zip" }, "title": { "value": "SAVA: Scalable Learning-Agnostic Data Valuation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0ULf242ApE
From Context to Concept: Concept Encoding in In-Context Learning
main
Active
mechanistic interpretability;in-context learning;large language models
interpretability and explainable AI
3;5;5;6
4;2;4;4
3;3;3;3
2;2;2;3
2;3;2;4
4.75
3.5
3
2.25
2.75
-0.132453
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. What specific hypotheses are being tested in each experiment? Could you clarify these? \n2. How does each experiment contribute to the overall research question? Can you make these connections more explicit?\n3. Are the findings entirely new, or do they replicate previous results? It would help if you clearly identified which results are reproductions and which are novel insights.\n4. In line 146, you mention that concept encoding and decoding are \"mutually reinforcing.\" Could you provide more evidence or context to support this claim? It may currently come across as overgeneralized.\n5. How does adding sparsity constraints to the sparse linear regression task enhance the study? Could you explain this addition’s significance in more detail?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The study addresses an interesting and practical research question: understanding the mechanism behind in-context learning (ICL) in LLMs. This is an intriguing problem from a scientific perspective and has important implications for real-world applications.\n- The authors designed a simple yet reasonable synthetic task to explore the model's emergent behavior in concept encoding and decoding. Although straightforward, the task is well-suited to investigate the research question.\n- By training a small GPT model from scratch and prompting the Llama 8B model, the authors effectively examined the hidden representations of LLMs, revealing the coupled emergence of concept encoding and concept decoding." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors investigate how concept understanding develops within transformers during training by studying a small model on synthetic tasks designed for in-context learning (ICL). They found that as the model learns to represent different underlying concepts (like identifying parts of a sentence), it also builds ways to decode these concepts, leading to better ICL performance. Examining a larger, pretrained model (Llama-3.1-8B), they show that its ability to encode concepts directly impacts its ICL effectiveness. Using techniques like controlled fine-tuning and targeted interventions, they demonstrate that improving concept encoding helps the model perform better on ICL tasks. They also experiment with prompting, finding that it can enhance concept encoding and ICL performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The research question and task design draw on prior related work, which may limit the novelty of this work on these points. (But I still want to emphasize that the authors add an interesting twist by incorporating sparsity constraints into the sparse linear regression task, which is a valuable contribution of this work. )\n- The hypotheses for each experiment and their specific contributions are not entirely clear. It is difficult to discern what each experiment aims to verify and whether the findings are novel.\nFor example, in line 146, the authors state, \"we demonstrate the emergence of concept encoding and decoding are mutually reinforcing.\" However, the experimental results lack sufficient evidence to support this claim, which may make this assertion seem overstated. I would encourage the authors to clarify their findings throughout the paper, clearly distinguishing between reproductions of prior work and novel insights, whether in synthetic tasks or large-scale experiments." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "This work explores an interesting and relevant topic while providing constructive insights into ICL training. However, the authors should improve the paper structurally by having clearer contribution highlights and a more rounded conclusion paragraph. Moreover, the results concerning number of demonstrations, fine tuning and the connection between the CD and ICL accuracy come as unexpected and are hindering the paper's contributions. \n\nGiven everything considered, I would be open to raise by score if the authors address the questions stated under the “Weaknesses” section and the following ones:\n- In the synthetic experiments in section 3, layer 5 is analyzed closely. Why was this specific layer chosen, and how do observations vary across other layers?\n- Can you please provide more details on the training setup for the synthetic experiments in section 3. Do you train 4 different models for different betas, or is it the same model? If it is the same model, how do you perform UMAP?\n- Have you considered testing the encoding - decoding capabilities across different models and tasks to show the generality of the encoding mechanism in large language models? Can the same be observed in multi-modal models?\n- Have you tested how the model handles more complex tasks, multi step tasks or tasks where concept overlap exists? Could you perform additional experiments to show the scenario with overlapping concepts?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- **S1:** The paper studies an interesting area of ICL, where the authors propose a new perspective on the training dynamics of ICL by showing the existence of a two-step encoder-decoder mechanism within the transformers. \n- **S2:** The paper is well-designed with sound experiments and with the study starting from synthetic and simpler tasks on a smaller model, and extending to the similar trends on a larger, real-world model and NLP tasks. Authors further conduct additional analysis based on model patching and fine tuning." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the training dynamics of in-context learning (ICL) in transformers by analyzing their representations. It demonstrates the emergence of concept encoding, where the model first encodes the latent concepts in the representation space, and decoding, where the model applies a selective algorithm. They initially show the existence of concept encoding-decoding on experiments with synthetic tasks using a smaller autoregressive model on a mixture of sparse linear regression tasks. The same concept encoding-decoding mechanism exists in the pretrained Llama-3 model, where the authors show that concept encoding holds for POS tagging and bitwise arithmetic tasks as well. The study further shows a causal link between concept decoding capabilities and ICL performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- **W1:** While the paper is well written overall, the introduction does not emphasize the main contributions and it is challenging to identify the importance of key insights from the beginning. Next, the paper closes with a brief discussion but lacks a fully rounded conclusion. \n- **W2:** Figure 1 is hard to interpret since the same markers are used for different LSE and Lasso regression. Moreover, there is also unnecessary whitespace around Figure 1. Finally, I believe there is a typo in row 377 and the text should refer to Figure 5.\n- **W3:** Although the study evaluates both synthetic and real-world tasks, the real-world experiments are limited to a single model (Llama-3.1-8B) and two relatively simple tasks, which raises concerns about whether the concept encoding-decoding mechanism will generalize to more complex or realistic tasks and larger models. Additional experiments on diverse or harder tasks could strengthen the evidence for generalizability.\n- **W4:** The paper shows unsurprising and expected results on Figure 3 and Figure 8. The finding that increased number of demonstrations lead to better encoding and decoding seems expected, as more examples provide more “learning” signal, which is observed for few-shot learning problems. \nNext, observing that some tasks fail to achieve high accuracy and Concept Decodability (CD) due to representation limitations aligns with existing research about the generalization capabilities of ICL and ICL failing in cases when the new or similar-enough task was not observed so frequently during pretraining, which is commented in the Discussion section.\nFinally, the observation that fine tuning the model improves CD and ICL accuracy is not unsurprising as the representation subspaces are aligned and the ICL task is now the same as the IWL task due to fine tuning." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "- In some recent work, Mittal, et al. [3] suggest that inferring latent variables doesn't necessarily improve ICL performance, and that the \"task vector\" view of ICL may be due to parametric shortcuts that are learned by transformers for certain tasks. I'm curious whether this paper's findings complement, support, or contradict this argument.\n___\n[3] Mittal, et al. Does learning the right latent variables necessarily improve in-context learning? 2024. (https://arxiv.org/pdf/2405.19162)" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is well written and easy to follow, and the figures clearly communicate the experimental results. The authors test a variety of tasks to show the generality of the findings, though they are relatively simple in nature. There are several lines of evidence that support the paper's conclusions, and it appears there is an appropriate amount of caution in presenting results the authors are unsure about." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies in-context learning in transformer models through the bayesian lens of concept inference. \nThey find that in a transformer trained on synthetic data, the model learns to separate tasks in its representation space, and this separation is important for task-specific prediction. They also study how concept encoding and decoding behavior emerges in transformers pre-trained on natural language, and find similar results." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The tasks studied here are rather simple. When tasks become more complicated, it's unclear whether the task-vector (and thus) concept inference hypothesis will hold. For example, in the synthetic setting, what happens as you increase the number of latent concepts to be learned? Do you find that more latent concepts causes the encoding to become less task-separable?\n\nSome of the experiments lack details that might help clarify some confusion/help with future replication. In particular, I have questions about the experiments in 4.3 and 4.4:\n\n- The experiment described in section 4.3 seems almost identical to the intervention experiment done by Hendel, et al. [1] to validate the \"task vector\" hypothesis (at least the positive case), but is missing experimental details. Are there any other differences in this setup besides the tasks, and testing with a \"null\" task vector? (e.g. do you patch at the final token/multiple tokens, \n\n- For the fine-tuning experiments in section 4.4, how can we be sure that the performance increase is due to the \"concept encoding\" and not something else? Can you describe your fine-tuning experiment setup in a bit more detail? Is each of these subplots a separate fine-tune, or do you fine-tune the layer set on all tasks at once? There are usually also performance gains for fine-tuning the last 10 layers as well. While not stated, it might be worth clarifying whether this paper's hypothesis for this increase is that fine-tuning the final layers strengthens the concept decoding capabilities of models.\n\nThe results could also be strengthened by showing these results hold across other model sizes and model families, since the only pretrained LLMs this paper studies is Llama 3 8B (with some training checkpoints results on OLMo-7B). I'd be curious how separability of the representations change across model sizes of the same family (for example - Llama 8B & 70B), or Pythia (Biderman, et al. [2]). Though, as it stands, the current results are reasonable evidence for the claims made.\n\n___\n\nMinor Notes (not worried about this, but just noticed while reading through):\n- Misspelling in Line 297: \"overt\" -> \"over\"\n- Mis-capitalization in Line 790: In this section, \"We\" -> we\n- Misspelling in Line 862: \"synthetinc\" -> synthetic\n\n___\n[1] Hendel, et al. In-Context Learning Creates Task Vectors. 2024. (https://aclanthology.org/2023.findings-emnlp.624/)\n\n[2] Biderman, et al. Pythia: A Suite for Analyzing Large Language Models Across Training and Scaling. 2023. (https://proceedings.mlr.press/v202/biderman23a.html)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* Why does context decodability peak in the middle layers and go down afterwards? \n* In the first experiment, you perform UMAP on the layer activity to find the clusters. UMAP often exaggerates differences. Does kNN classification work here too?\n* When you talk about \"layer activations\", do you mean residual stream representations, or the output of the transformer layers, which are added to the residual stream?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* Research question is timely.\n* The authors perform multiple experiments both with toy models and LLMs.\n* The experiments and analyses are conceptually simple and neatly replicate findings from Hendel et al. 2023.\n* Multiple types of interventions were used to validate the efficacy of the task representations, including fine-tuning and activation patching." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors show that Transformers learn to solve certain tasks in-context by inferring/embedding contexts in separable representations. Conditioning in these task variables allows the Transformer to accurately predict in-context. The authors show this in experiments conducted both on toy-models trained to perform linear regression and on large-scale Transformers like Llama 8B. They also show that task separability correlates with ICL accuracy. This connection is also demonstrated using activation patching and fine-tuning on selective parts of the Transformer models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While the experiments and analyses are sound, the results seem more like a replication of the findings from Hendel et al. 2023 or Todd et al. 2023. It is not clear how the concept encoding/decoding framework differs from the Task Vector framework, or why it is necessary to use the concept encoding/decoding framework in the first place. Would it not be fair to characterize the separable representations of the tasks in the ICL experiments as task vectors?\n\nAs such, the findings do not seem very novel or surprising in light of previous papers, like Hendel et al. 2023. While the results presented are interesting, I think the paper would benefit a lot from showing something that hadn't already been shown in previous works. For instance, the analysis showing that fine-tuning early layers of Llama improves the concept separability goes in this direction. At the very least, the authors can help explain why the findings are novel, why their framework is needed, or why performing experiments the way they were done improves our understanding of ICL beyond previous papers. Currently, the paper reads like it gives more credible evidence to the existence of task vectors, which is nice, but it choses to call it 'Concept encodings' instead, which is confusing and seems unnecessary.\n\nThe writing and explanations can also be improved in various places. The framework that is proposed, which makes reference to 'concepts' is confusing. Why not just stick with existing terminology like task vectors? The term 'Concept' is very loaded, and it is not clear that it adds anything to modelling ICL here. \n\nAt the same time I think the paper gives some nice supplementary evidence for the existence of task vectors. I would be willing to increase my score if the authors can address the above criticisms.\n\nThere are also some typoes and weird formulations:\n* Line 122 \"Bayeisan\"\n* Line 75, 261, 269 \"solve ICL\" doesn't seem quite right. The models learn to perform ICL, but ICL is not solved.\n* Line 161 \"over the sequence of sequence of context length 20\"\n* I don't think the quote at the beginning of the paper adds anything and I would recommend removing it." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We demonstrate that transformers learn to encode latent concepts into distinct representations to learn concept-dependent decoding algorithms, and their ability to distinguish these concepts predicts their in-context learning performance." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024from,\ntitle={From Context to Concept: Concept Encoding in In-Context Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0ULf242ApE},\nnote={under review}\n}" }, "abstract": { "value": "Humans distill complex experiences into fundamental abstractions, enabling rapid learning and adaptation. Similarly, autoregressive transformers exhibit adaptive learning through in-context learning (ICL). This raises the question of how abstractions play a role in ICL and are represented within the models. In this paper, we investigate how this mechanism emerges in transformers during training by studying their representations. By observing the training dynamics of a small transformer on synthetic ICL tasks, we show the coupled emergence of concept encoding and concept decoding. As the model learns to encode different latent concepts (e.g., \"Finding the first noun in a sentence.\") into distinct, separable representations, it conditionally builds decoding algorithms and improve its ICL performance. Based on this mutual dependency, we hypothesize that the model's ability to discern between the latent concepts is predictive of downstream ICL performance. We empirically characterize that a pretrained Llama-3.1-8B also exhibits concept encoding abilities. Moreover, with mechanistic interventions and controlled finetuning, we reveal that the accuracy of concept inference is causally related to ICL performance. Our empirical insights shed light into better understanding the success and failure modes of large language models via their representations." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "mechanistic interpretability", "in-context learning", "large language models" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/c8c49dadd20415a519e857795508a86ec01c84b5.pdf" }, "presentation": null, "primary_area": { "value": "interpretability and explainable AI" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "From Context to Concept: Concept Encoding in In-Context Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0UO1mH3Iwv
Edge-aware Image Smoothing with Relative Wavelet Domain Representation
main
Active
Image smoothing;Wavelet transformation;Relative wavelet domain representation;Edge-preserving;Non-convex optimization
optimization
6;6;6
4;5;3
3;3;3
2;3;3
3;3;4
6
4
3
2.666667
3.333333
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Could the authors provide an online demo to allow users to test the method easily? While it’s not essential for acceptance, it would add value for potential users." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper introduces relative wavelet domain representation into bilateral filtering, which is reasonable and novel.\n\n2. The method achieves superior visual results compared to previous studies. \n\n3. The paper includes comprehensive theoretical derivations, technical descriptions, and runtime analysis of the algorithm." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The main contribution of this work is the introduction of RWDR that effectively distinguishes textures from primary structures and preserves weaker edges. Additionally, the paper proposes an innovative edge-aware scale map method that dynamically adjusts scale based on the image structure, resulting in clearer distinctions between structure and texture. Experimental results demonstrate that the proposed approach provides superior edge-preserving smoothing compared to existing methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper provides extensive visual results, but I’m curious how different algorithms are objectively evaluated based on visual quality. The authors should consider comparing performance on downstream tasks with objective metrics. A user study could also statistically confirm the advantages of the proposed method.\n\n2. As a new method, it likely performs well in certain scenarios. However, I am more interested in its robustness and stability. In other words, can the authors provide a lower bound for the algorithm's performance? In which scenarios might it fail? Additionally, how sensitive is the algorithm to parameter changes?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please check the weakness section." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The solution of the proposed model is supported by a complete theoretical guarantee, which is a strong point.\n2. Extensive experiments prove that the proposed method outperforms existing algorithms in mitigating gradient reversals,\n staircase artifacts, and halos and achieves a superior performance in balancing smoothing strength and edge preservation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The author introduces a mutually guided edge-aware smoothing model based on relative wavelet domain representation. Their proposed RWDR serves as a novel measure for effectively differentiating between textures and structures." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Though the authors support their claims by extensive qualitative results, but they should also provide the quantitative results to validate their points in the main paper or at least in the supplementary. For instance, the authors can include PSNR (Peak Signal-to-Noise Ratio), SSIM (Structural Similarity Index), or MSE (Mean Squared Error) on standard synthetic benchmark datasets and LPIPS, MUSIQ, NIQE, MANIQA for real-world tasks. This would allow for a more objective comparison with existing method.\n2. The method section needs to be refined, as mentioned in Fig1 (that the detail enhancement image is boosted by four detail layers), this statement is not explained in the method section, how the four detail layers are being generated , is it from the wavelet decomposition?\n3. The paper has lacks ablation study. The authors have given mathematical proofs of choosing the particular operations like RWDR and the edge-aware scale map, they should also try to prove the effectiveness of each proposed component on the overall model.\n4. The authors should also try to report the results on some real-world applications in Super-Resolution (RealSR, DrealSR,RealLR200), denoising (SIDD, DND) that would further prove the use of the proposed model, if the time permits and should also check on synthetic SR datasets like Manga109, Urban100, and BSD68 (for denoising)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.In the experimental part of this paper, there is a predominance of qualitative analysis of images. However, due to the significant subjective factors inherent in qualitative experiments, supplementing with more quantitative experiments would enhance the persuasiveness of the results; 2.Image smoothing operations, as one of the fundamental image processing tasks, play a crucial role in various visual tasks. However, the paper seems to lack exploration of specific visual tasks (for example, in super-resolution tasks, the textures and structures preserved after image smoothing are vital for image reconstruction)." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1.This paper proposes a relative wavelet domain representation and an edge-aware smoothing model, achieving certain progress in image smoothing technology; 2.The paper utilizes extensive theoretical proofs to establish a mathematical model for the relative wavelet domain. The experimental validation is well-supported by theory; 3.The writing of this paper is relatively fluent and conforms to the standards of English academic writing." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this article, the author reviews image smoothing methods based on local information, global information, and deep learning, and discusses the limitations of current image smoothing techniques when dealing with image textures and image structural edges. To address this issue, first, the author proposes a novel edge-aware smoothing model that more effectively distinguishes between image textures and image structures through relative wavelet domain representation (RWDR). Second, the author reintroduces edge-aware scale maps into bilateral filters to improve image edges during the smoothing process. Finally, the author demonstrates the superiority of this method in texture preservation and artifact removal after image smoothing through comprehensive theoretical derivations and experimental results compared to other algorithms." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.In the experimental part of this paper, there is a predominance of qualitative analysis of images. However, due to the significant subjective factors inherent in qualitative experiments, supplementing with more quantitative experiments would enhance the persuasiveness of the results; 2.Image smoothing operations, as one of the fundamental image processing tasks, play a crucial role in various visual tasks. However, the paper seems to lack exploration of specific visual tasks (for example, in super-resolution tasks, the textures and structures preserved after image smoothing are vital for image reconstruction)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024edgeaware,\ntitle={Edge-aware Image Smoothing with Relative Wavelet Domain Representation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0UO1mH3Iwv},\nnote={under review}\n}" }, "abstract": { "value": "Image smoothing is a fundamental technique in image processing, designed to eliminate perturbations and textures while preserving dominant structures. It plays a pivotal role in numerous high-level computer vision tasks. More recently, both traditional and deep learning-based smoothing methods have been developed. However, existing algorithms frequently encounter issues such as gradient reversals and halo artifacts. Furthermore, the smoothing strength of deep learning-based models, once trained, cannot be adjusted for adapting different complexity levels of textures. These limitations stem from the inability of previous approaches to achieve an optimal balance between smoothing intensity and edge preservation. Consequently, image smoothing while maintaining edge integrity remains a significant challenge. To address these challenges, we propose a novel edge-aware smoothing model that leverages a relative wavelet domain representation. Specifically, by employing wavelet transformation, we introduce a new measure, termed Relative Wavelet Domain Representation (RWDR), which effectively distinguishes between textures and structures. Additionally, we present an innovative edge-aware scale map that is incorporated into the adaptive bilateral filter, facilitating mutual guidance in the smoothing process. This paper provides complete theoretical derivations for solving the proposed non-convex optimization model. Extensive experiments substantiate that our method has a competitive superiority with previous algorithms in edge-preserving and artifact removal. Visual and numerical comparisons further validate the effectiveness and efficiency of our approach in several applications of image smoothing." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Image smoothing", "Wavelet transformation", "Relative wavelet domain representation", "Edge-preserving", "Non-convex optimization" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/6759918d765c1a588f4aa83f598a92009edf061e.pdf" }, "presentation": null, "primary_area": { "value": "optimization" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/6dac8703505dd8267718d335e6538241dd057265.zip" }, "title": { "value": "Edge-aware Image Smoothing with Relative Wavelet Domain Representation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
0UvlnHgaii
Toward Exploratory Inverse Constraint Inference with Generative Diffusion Verifiers
main
Active
Inverse Reinforcement Learning;Generative Diffusion Model
reinforcement learning
5;5;6
3;3;3
3;2;3
3;2;3
2;2;3
5.333333
3
2.666667
2.666667
2.333333
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. My biggest confusion is about how the reward and cost are defined, respectively. Usually reward is defined as the negative cost if cost is positive, but in this paper, it seems not. Can you explicitly show how they are defined and how different they are?\n2. In section 4.2, on line 286, how is $\\phi_\\omega(s_t^i, a_t^i, i)$ defined? \n3. In section 4.3, can you explicitly give the expressions for dist$[1, \\phi_\\omega(s_t, a_t)$ and dist$[\\tilde\\phi_\\omega(s_t, a_t), \\phi_\\omega(s_t, a_t)])$?\n4. In algorithm 1, ``Updating $\\lambda$ by minimizing the loss $\\mathcal{L} = \\lambda \\mathbb{E}_{\\hat\\tau\\sim \\tilde{p}_M}[c(\\tau) - \\epsilon]$, why is no reward term involved here to update $\\lambda$? Another question related to this in Table 2: there is a significant discrepancy between the magnitudes of the Reward and Cost. Could you provide some insight into this?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. Introduction clearly states the current issues in Inverse Constraint Learning and the related works section is complete.\n2. The experiments are comprehensive, demonstrating the effectiveness of the proposed approach." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes ExICL to tackle Inverse Constraint Learning problem, which aims to recover a diverse set of feasible constraints through an exploratory constraint update mechanism. The designed generative diffusion verifier utilizes the guided sampling strategy to verify the feasibility of explored constraints. This paper also aims to guarantee the robustness of feasible constraints discovery by accurately estimating the cost of noisy trajectory." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The contributions claimed in this paper are not apparent to me. Contents in 4.1 is quite close to what has been proposed in [1], and the non-convex objective theorem is inherited from [2]; the ambiguity of how things are defined in section 4.2, 4.3 impairs the significance of contributions again. There are many math notations are not defined or briefly mention. I will list each of them below in the question section. I found it confusing and hard to see how the idea works.\n2. Again, theorem 4.1 seems related to some existing conclusion from Paternain's paper [2], and this theorem is critical as it supports the zero duality gap for non-convex objective. The theorem stated in this paper is not quite the same as what is shown in [2], as the constraints here are not constant but are functions, but constants in [2]. There is supposed to be a connection shown here to support the theorem or a direct proof. A typo follows the theorem in Equation (9): $\\lambda\\epsilon$ might be missing at the end in the exponential term.\n\n[1] Janner, Michael, et al. \"Planning with Diffusion for Flexible Behavior Synthesis.\" International Conference on Machine Learning. PMLR, 2022.\n[2] Paternain, Santiago, et al. \"Constrained reinforcement learning has zero duality gap.\" Advances in Neural Information Processing Systems 32 (2019)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- How are you selecting the constraint out of the constraint pool discovered by Ex-ICL for the experiment section?\n- Why does Figure 4's Ex-ICL figure have so much larger variance for bad trajectory cost value than other methods?\n- How sensitive are the results to exploration coefficient \\delta and exploration round m? Also, would it be instructive to showcase model performance for Ex-ICL that only searches over a single \\delta?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The idea of amortizing the ICL loop cost by pre-training a diffusion model is interesting.\n- The paper provides convincing empirical results that show the superiority of their method compared to the baselines of their experiments, both for reward and cost. It also investigates how reliable feasibility functions are on expert non and non-expert data.\n- While they have not directly demonstrated the advantages of having multiple constraint candidates returned by the algorithm (aside from possibly making search more efficient), this seems like a practical feature to have for real world use cases." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper tackles the safe reinforcement learning problem using a diffusion model and guidance to train a set of feasibility functions. Unlike traditional inverse constraint learning, which is difficult to verify whether a candidate constraint is feasible and returns a single constraint, the paper's algorithm rapidly recovers a diverse set of constraints once the diffusion model is trained on expert data. The paper's algorithm outperforms baselines on constrained mazes and Mujoco experiments regarding performance and sample efficiency." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- While the authors list computational concerns as one of the advantages Ex-ICL has over ICL, they do not conclusively show Ex-ICL's computational advantage. Figure 6 shows that Ex-ICL is more sample efficient in constraint inference, but a true test of computational efficiency should also take into account diffusion model training time.\n- The experiments on maze and Mujoco are comprehensive but are fairly simple. For example, the baseline paper [1] includes a more realistic experiment on traffic scenarios.\n- There's not enough detail in the main paper or appendix on methodology (how is \\phi parameterized?)\n\n[1] Guorui Quan, Zhiqiang Xu, & Guiliang Liu (2024). Learning Constraints from Offline Demonstrations via Superior Distribution Correction Estimation. In Forty-first International Conference on Machine Learning." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See also questions under \"weaknesses\"\n- Do the authors have some intuition why their method seems to outperform baselines significantly for HalfCheetah, marginally for Limited-Walker and only ties for Blocked-Ant?\n- In the MuJoCo experiments, is the reward presented in Table 2 the feasible reward? I.e. are rewards truncated after a constraint has been violated? It seems that that would be the more inveresting metric to report, I would recommend the authors report that metric, and if they already do so make it clear it is that metric." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The technique makes clever use of the advanatages found in diffusion techniques: being able to modify the policy at run time by applying guidance terms\n- The paper strikes a good balance of building a new method out of existing elements." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors consider inverse constraint learning, and improve on previous work by constructing an algorithm that can generate a set of constraints, and verify those constraints by applying techniques developed in diffusion modelling for RL. In particular, the authors construct a guidance term that is the gradient of a set of feasiblity terms, which they can use for on the fly verification of the proposed feasilibity functions, thereby eliminating a costly second optimization loop. The authors test their proposed algorithm on a variety of RL benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The authors seem to omit some details of their mechanism, which I think are quite crucial to the paper. These are:\n- how is reward treated? Is a separate reward model that is (1) differentiable, and (2) conditioned on diffusion time (i in the author's notation) trained following Janner et al? These details are not present in Alg. 1, but are necessary to evaluate the gradient p_Mc in eqns (9) and (10). \n- It is also not made clear whether in (9) and (10) the feasibility functions and reward are made to condition on diffusion time i, as I would expect it should since only tau_i is available at i.\n- After algorithm 1 is complete, how is the final policy constructed for the experiments? Perhaps this is as simple as running eqn. (9) and (10) a final time, but this is not specified either. \n- After algorithm 1 completes, how are constraints chosen by the practitioner as the abstract says? How do the authors choose what constraints they apply when sampling their final evaluations? This is stated in the abstract but is not discussed in the paper at all. \n- How is constrained data collected? Is there an expert that already includes the constraint?\n\nMinor:\n- A few scattered grammar errors could be addressed" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose an exploratory inverse constraint learning algorithm for inferring a diverse set of feasible constraints from offline dataset." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024toward,\ntitle={Toward Exploratory Inverse Constraint Inference with Generative Diffusion Verifiers},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=0UvlnHgaii},\nnote={under review}\n}" }, "abstract": { "value": "An important prerequisite for safe control is aligning the policy with the underlying constraints in the environment. In many real-world applications, due to the difficulty of manually specifying these constraints, existing works have proposed recovering constraints from expert demonstrations by solving the Inverse Constraint Learning (ICL) problem. However, ICL is inherently ill-posed, as multiple constraints can equivalently explain the experts' preferences, making the optimal solutions not uniquely identifiable. In this work, instead of focusing solely on a single constraint, we propose the novel approach of Exploratory ICL (ExICL). The goal of ExICL is to recover a diverse set of feasible constraints, thereby providing practitioners the flexibility to select the most appropriate constraint based on the needs of practical deployment. To achieve this goal, we design a generative diffusion verifier, which guides the trajectory generation process using the probabilistic representation of an optimal constrained policy. By comparing these decisions with those made by expert agents, we can efficiently verify a candidate constraint. Driven by the verification feedback, ExICL implements an exploratory constraint update mechanism that strategically facilitates the diversity within the collection of feasible constraints. Our empirical results demonstrate that ExICL can seamlessly and reliably generalize across different tasks and environments." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Inverse Reinforcement Learning", "Generative Diffusion Model" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/3292081507031f678c9861e04e62d4575dccb7a7.pdf" }, "presentation": null, "primary_area": { "value": "reinforcement learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Toward Exploratory Inverse Constraint Inference with Generative Diffusion Verifiers" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]