Add Batch c15e2a77-9225-424e-bbaf-363176aafcb8
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- aaar10assessingaispotentialtoassistresearch/86bc9287-7ab0-4f58-9e16-bf41de17edfb_content_list.json +3 -0
- aaar10assessingaispotentialtoassistresearch/86bc9287-7ab0-4f58-9e16-bf41de17edfb_model.json +3 -0
- aaar10assessingaispotentialtoassistresearch/86bc9287-7ab0-4f58-9e16-bf41de17edfb_origin.pdf +3 -0
- aaar10assessingaispotentialtoassistresearch/full.md +450 -0
- aaar10assessingaispotentialtoassistresearch/images.zip +3 -0
- aaar10assessingaispotentialtoassistresearch/layout.json +3 -0
- abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/be605c98-e986-4916-a1cf-5a2cf4d89930_content_list.json +3 -0
- abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/be605c98-e986-4916-a1cf-5a2cf4d89930_model.json +3 -0
- abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/be605c98-e986-4916-a1cf-5a2cf4d89930_origin.pdf +3 -0
- abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/full.md +494 -0
- abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/images.zip +3 -0
- abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/layout.json +3 -0
- abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/89ad1898-ec6d-430c-a0d7-cdc1fb5659fa_content_list.json +3 -0
- abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/89ad1898-ec6d-430c-a0d7-cdc1fb5659fa_model.json +3 -0
- abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/89ad1898-ec6d-430c-a0d7-cdc1fb5659fa_origin.pdf +3 -0
- abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/full.md +0 -0
- abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/images.zip +3 -0
- abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/layout.json +3 -0
- abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/474f3ca7-b597-40ce-a816-f5a449c555ce_content_list.json +3 -0
- abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/474f3ca7-b597-40ce-a816-f5a449c555ce_model.json +3 -0
- abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/474f3ca7-b597-40ce-a816-f5a449c555ce_origin.pdf +3 -0
- abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/full.md +615 -0
- abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/images.zip +3 -0
- abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/layout.json +3 -0
- accelerateddiffusionmodelsviaspeculativesampling/69ab461c-709a-4477-81b9-473befc6be24_content_list.json +3 -0
- accelerateddiffusionmodelsviaspeculativesampling/69ab461c-709a-4477-81b9-473befc6be24_model.json +3 -0
- accelerateddiffusionmodelsviaspeculativesampling/69ab461c-709a-4477-81b9-473befc6be24_origin.pdf +3 -0
- accelerateddiffusionmodelsviaspeculativesampling/full.md +0 -0
- accelerateddiffusionmodelsviaspeculativesampling/images.zip +3 -0
- accelerateddiffusionmodelsviaspeculativesampling/layout.json +3 -0
- acceleratinglargelanguagemodelreasoningviaspeculativesearch/421c4b62-cf3e-458b-b763-7a580b21e488_content_list.json +3 -0
- acceleratinglargelanguagemodelreasoningviaspeculativesearch/421c4b62-cf3e-458b-b763-7a580b21e488_model.json +3 -0
- acceleratinglargelanguagemodelreasoningviaspeculativesearch/421c4b62-cf3e-458b-b763-7a580b21e488_origin.pdf +3 -0
- acceleratinglargelanguagemodelreasoningviaspeculativesearch/full.md +0 -0
- acceleratinglargelanguagemodelreasoningviaspeculativesearch/images.zip +3 -0
- acceleratinglargelanguagemodelreasoningviaspeculativesearch/layout.json +3 -0
- acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/6e68aa2b-d20f-464e-8322-2e96f58dc240_content_list.json +3 -0
- acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/6e68aa2b-d20f-464e-8322-2e96f58dc240_model.json +3 -0
- acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/6e68aa2b-d20f-464e-8322-2e96f58dc240_origin.pdf +3 -0
- acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/full.md +0 -0
- acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/images.zip +3 -0
- acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/layout.json +3 -0
- addqadaptivedistributionaldoubleqlearning/42431bfd-4f0e-441e-9c8a-947af21cd543_content_list.json +3 -0
- addqadaptivedistributionaldoubleqlearning/42431bfd-4f0e-441e-9c8a-947af21cd543_model.json +3 -0
- addqadaptivedistributionaldoubleqlearning/42431bfd-4f0e-441e-9c8a-947af21cd543_origin.pdf +3 -0
- addqadaptivedistributionaldoubleqlearning/full.md +0 -0
- addqadaptivedistributionaldoubleqlearning/images.zip +3 -0
- addqadaptivedistributionaldoubleqlearning/layout.json +3 -0
- adhmraligningdiffusionbasedhumanmeshrecoveryviadirectpreferenceoptimization/a763c3ea-e1d0-42c6-96b4-e51045315b61_content_list.json +3 -0
- adhmraligningdiffusionbasedhumanmeshrecoveryviadirectpreferenceoptimization/a763c3ea-e1d0-42c6-96b4-e51045315b61_model.json +3 -0
aaar10assessingaispotentialtoassistresearch/86bc9287-7ab0-4f58-9e16-bf41de17edfb_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:18f0a2c4f7b7e42d838c3bee9d4889ed0b18a9c217e8cfdebdcf6648dd30a6ae
|
| 3 |
+
size 137336
|
aaar10assessingaispotentialtoassistresearch/86bc9287-7ab0-4f58-9e16-bf41de17edfb_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fe575a6024c9bc3f5c3e8037b6e45efdc2005be9380ef42bd61926343daee62c
|
| 3 |
+
size 162123
|
aaar10assessingaispotentialtoassistresearch/86bc9287-7ab0-4f58-9e16-bf41de17edfb_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d91cc77a237ccddc1ddae9b9adbb09fb51daee7e46e45e40aa864c873ad87f37
|
| 3 |
+
size 3477634
|
aaar10assessingaispotentialtoassistresearch/full.md
ADDED
|
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Renze Lou<sup>1</sup> Hanzi Xu<sup>2</sup> Sijia Wang<sup>3</sup> Jiangshu Du<sup>4</sup> Ryo Kamoi<sup>1</sup> Xiaoxin Lu<sup>1</sup> Jian Xie<sup>5</sup> Yuxuan Sun<sup>5</sup> Yusen Zhang<sup>1</sup> Jihyun Janice Ahn<sup>1</sup> Hongchao Fang<sup>1</sup> Zhuoyang Zou<sup>1</sup> Wenchao Ma<sup>1</sup> Xi Li<sup>6</sup> Kai Zhang<sup>7</sup> Congying Xia<sup>5</sup> Lifu Huang<sup>3</sup> Wenpeng Yin<sup>1</sup>
|
| 2 |
+
|
| 3 |
+
# Abstract
|
| 4 |
+
|
| 5 |
+
Numerous studies have assessed the proficiency of AI systems, particularly large language models (LLMs), in facilitating everyday tasks such as email writing, question answering, and creative content generation. However, researchers face unique challenges and opportunities in leveraging LLMs for their own work, such as brainstorming research ideas, designing experiments, and writing or reviewing papers. In this study, we introduce AAAR-1.0, a benchmark dataset designed to evaluate LLM performance in three fundamental, expertise-intensive research tasks: (i) EQUATIONINFERENCE, assessing the correctness of equations based on the contextual information in paper submissions; (ii) EXPERIMENTDESIGN, designing experiments to validate research ideas and solutions; and (iii) PAPERWEAKNESS, identifying weaknesses in paper submissions. AAAR-1.0 differs from prior benchmarks in two key ways: first, it is explicitly researched-oriented, with tasks requiring deep domain expertise; second, it is researcher-oriented, mirroring the primary activities that researchers engage in on a daily basis. An evaluation of both open-source and closed-source LLMs reveals their potential as well as limitations in conducting sophisticated research tasks. We will keep iterating AAAR-1.0 to new versions. Project Webpage: https://renzelou.github.io/AAAR-1.0/
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
Task #2: Experiment Design
|
| 9 |
+
Figure 1: The input-output illustration of three tasks in the proposed AAAR-1.0 benchmark.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
Although AI has brought transformative changes to various aspects of life, its impact on researchers unfolds in a nuanced manner. On the one hand, AI assists in various research disciplines, such as Social Science (Neuman et al., 2023), Finance (Gu et al., 2024), Medicine (Rakhimov et al., 2022), GeoScience (Praskievicz, 2018), etc., significantly expediting academic processes. However, many of these applications are superficial, often limited to data-driven clustering or classification. On the flip side, the AI era poses challenges for researchers. Despite its ability to streamline some activities, researchers still face demanding, cognitively intensive tasks such as staying current through extensive paper reading, rapidly generating ideas in response to fast-paced advancements, conducting rigorous experiments to substantiate claims, and managing an increasing volume of peer reviews. Then a question looms: How effectively can AI assist researchers in tasks that are domain-specific, expertise-demanding, and reasoning-intensive?
|
| 14 |
+
|
| 15 |
+
Existing works proved the promising potential for using LLMs in assisting AI research. Si et al. (2024) conducted a large-scale human study and found that LLMs can gen
|
| 16 |
+
|
| 17 |
+
erate creative research ideas. Lu et al. (2024) proposed an autonomous agent to handle complicated research workflow and write a whole research paper. However, most of these works focus on addressing highly subjective problems that require a high degree of expertise, making evaluation laborious and hard to reproduce. This underscores the need for a comprehensive benchmark that rigorously assesses LLMs' capabilities in expertise-intensive research activities.
|
| 18 |
+
|
| 19 |
+
To this end, in this work, we introduce AAAR-1.0, a novel benchmark that aims to comprehensively assess the LLMs' capacity on expert-level research tasks. As illustrated in Figure 1, AAAR-1.0 decomposes three distinct expert-level AI research tasks from the researcher's daily activities, including i) EQUATIONINFERENCE, investigating whether the LLMs can infer the equation correctness based on the paper context; ii) EXPERIMENTDESIGN, validating LLMs' ability on designing reliable experiments for a research idea; and iii) PAPERWEAKNESS, testing the quality of weaknesses discovered by LLMs from paper drafts. To ensure data quality, senior AI researchers with extensive domain expertise perform data annotation for AAAR-1.0, followed by rigorous multi-round data examination and filtering. All three tasks require models to possess strong domain knowledge covering various cutting-edge research findings, as well as expert-level research experience, to the extent that even humans need substantial research accumulation to tackle the tasks we designed. Crucially, tasks here are singular, standalone challenges (with clear input and output expectations) rather than a complicated task chain (Li et al., 2024; Lu et al., 2024), providing a more transparent assessment of the model's intermediate output. Benefiting from the proposed automatic metrics, we conduct extensive experiments across numerous mainstream LLMs, where we find that:
|
| 20 |
+
|
| 21 |
+
- With a random guess baseline of $40\%$ $\mathrm{F}_1$ , the performance of most LLMs on EQINFER hovers just slightly above chance, with the top models reaching around $46\%$ . This highlights the difficulty of the task, despite its reliance primarily on local context reasoning.
|
| 22 |
+
- In EXPDESIGN, LLM-designed experiments are innovative and more diverse than those by humans; however, many are trivial, lack feasibility, and stray from the original research objectives.
|
| 23 |
+
- In PAPERWEAKNESS, LLM-identified weaknesses often lack depth and specificity, making them broadly applicable and less useful for providing feedback on paper drafts.
|
| 24 |
+
|
| 25 |
+
# 2. Related Work
|
| 26 |
+
|
| 27 |
+
LLMs for AI Research. With the rapid evolution of pertaining techniques, LLMs are found to be useful in assisting various research disciplines (Yu et al., 2024a; Labrak et al.,
|
| 28 |
+
|
| 29 |
+
2024), particularly in AI research, such as generating novel research ideas (Kumar et al., 2024; Yu et al., 2024b), reviewing research draft (Gao et al., 2024; Du et al., 2024; Liang et al., 2024; Zhu et al., 2025), and writing scientific papers (Chamoun et al., 2024; Lu et al., 2024; Weng et al., 2024). For example, Si et al. (2024) conducted a large-scale human investigation on LLM-generated research ideas and found that LLMs can generate novel ideas compared with humans while lacking feasibility. Du et al. (2024) found that while LLMs are effective at summarizing papers, they tend to overly trust the authors' claimed strengths and struggle to identify weaknesses specific to the paper. Furthermore, some works try to employ LLMs to solve more complicated research tasks that are composed of multiple steps (Li et al., 2024; Tang et al., 2023). Notably, Lu et al. (2024) proposed AI-SCIENTIST, an autonomous agent framework that can handle a series of challenging research tasks consecutively, including generating research ideas, coming up with the corresponding experiments along with the implementations, and then writing the final research paper — exactly how human conduct a whole research pipeline. However, there is still a lack of systematic evaluations and quantitative analyses on the LLMs' (intermediate) output of each single-step research task. Accordingly, our work focuses on building a benchmark consisting of individual research steps with clear input-output expectations, making it suitable for comprehensive LLM evaluation. Moreover, we emphasize that relying on LLMs to fully replace human effort might compromise academic integrity. While our benchmark primarily serves an educational purpose — LLMs assist junior researchers by providing imperfect but insightful ideas, rather than by governing the entire research process.
|
| 30 |
+
|
| 31 |
+
Benchmarks for AI Research Tasks. Existing "LLM assists research" benchmarks mainly focus on the implementation and execution part of the research pipeline (Lu et al., 2024; Chen et al., 2024a; Li et al., 2024; Chan et al., 2024). For instance, Huang et al. (2024) proposed MLAgentBench to test the LLMs' capacity for writing project code and training the ML models, where the evaluation metric is the test performance of the models trained by LLMs. However, real-world AI research activities are diverse and some of them are hard to assess for quality, such as generating research ideas, which requires intensive manual assessment (Si et al., 2024; Liang et al., 2024). Our work centers on tasks that emphasize a comprehensive mastery of the scientific research field and core elements of a researcher's daily workload, and we try to build curated task-specific metrics for every single task for a more efficient and accurate LLMs appraisal.
|
| 32 |
+
|
| 33 |
+
# 3. AAAR-1.0
|
| 34 |
+
|
| 35 |
+
Figure 2 provides a data construction overview. In the following sections, we elaborate on the data collection de
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Figure 2: Data construction workflows of the three tasks in AAAR-1.0.
|
| 39 |
+
|
| 40 |
+
tails, including § 3.1 EQUATION INFERENCE (EQINFER), § 3.2 EXPERIMENT DESIGN (EXPDESIGN), and § 3.3 PAPER WEAKNESS (WEAKNESS).
|
| 41 |
+
|
| 42 |
+
# 3.1. EQUATIONINFERENCE
|
| 43 |
+
|
| 44 |
+
Crafting a correct scientific equation in paper writing or validating an equation in paper reviewing is challenging, as it requires a thorough understanding of an algorithm or the intricate relationships among numerous variables. Directly prompting LLMs to generate equations proves overly demanding. Therefore, this work formulates EQINFER (Figure 1) as a binary inference task. $^{1}$
|
| 45 |
+
|
| 46 |
+
$①$ Data crawling and cleaning. For the data source, we adopt the pre-Compilation LaTeX code for two reasons: i) existing PDF parsing tools, such as PyMuPDF and PaperMage (Lo et al., 2023), can introduce considerable noise to the parsed equation text; ii) considering most of exiting LLMs are capable with processing LaTeX code, using LaTeX source instead of parsed text can be more accurate and provide LLMs with richer information. Meanwhile, we only crawl those peer-reviewed papers accepted by top-tier conferences to avoid using low-quality human-written equations. Accordingly, we first obtain the accepted paper list from ACL Anthology, from year 2019 to 2023. Next, we search each paper on arXiv to crawl its LaTeX source (if it exists). Finally, we get a total of 1,762 papers' source LaTeX packages. We then clean the LaTeX sources by deleting all the comments and combining multiple cross-referred .tex files into a main file. Afterward, we use regex to randomly extract (at most) 3 equations' code snippets per paper, resulting in 3,877 human-written equations.
|
| 47 |
+
|
| 48 |
+
② LLM-based equation synthesis. As EQINFER assessing whether the LLMs can infer the correctness of equation (i.e., binary classification), for each human-written positive equation, we have to craft counterpart negative equations. To this end, for each positive equation, we prompt GPT-4 to synthesize a negative equation based on the paper context. We repeat this prompt (with a high decoding temperature) until three different negative equations are synthesized.
|
| 49 |
+
|
| 50 |
+
$③$ LLM-based filtering. However, the LLM-synthetic equations can be context-unaligned, i.e., some synthesized equations contain notation that is never defined in the paper context, which becomes a superficial shortcut and too effortless for LLMs to identify. To improve data quality, we prompt GPT-4 to identify context-unaligned negative equations. We then eliminate the positive equation and its negative counterparts, where all three negative counterparts are unaligned. This filtering leads to a final of 1,449 positive equations and 4,347 negative equations (each positive equation has three negative counterparts, and at least one negative counterpart is "challenging").
|
| 51 |
+
|
| 52 |
+
$④$ Expert-based examination. Furthermore, it's also possible that synthesized negative equations are actually correct (i.e., false negative) — even if the negative and positive equations are written differently, the final compiled results might be the same. We then employ human experts to review the data further and filter out false negative equations, checking the classification instances for accuracy.
|
| 53 |
+
|
| 54 |
+
We asked 5 senior PhD students who are experienced in AI research to check all instances. We ask human experts to consider the following criteria for each positive equation and its negative counterparts (each pair): i) Are all equations
|
| 55 |
+
|
| 56 |
+
grammatically correct? ii) After compilation, are all negative equations different from the positive ones? We ask every human expert to use external LaTeX compilation tools (e.g., TeXlive), and identify the pairs that cannot meet the criteria. Each pair is examined by at least two experts, and we only keep pairs that all experts decide to keep. After this strict examination, a total of 1,049 pairs are eventually kept (27.6% pairs are filtered)
|
| 57 |
+
|
| 58 |
+
Final data. We finally obtain 1,049 positive equations (each has three negative counterparts). We show data statistics of EQINFER in Table 7 and data examples in Figure 8.
|
| 59 |
+
|
| 60 |
+
# 3.2. EXPERIMENTDESIGN
|
| 61 |
+
|
| 62 |
+
Given a research topic, such as a novel ML algorithm, a qualified researcher can design a solid experiment plan for it, and clarify underlying motivation to ensure the reliability of the designed experiment. Unlike the concurrent works that focus on the experiment implementation (Lu et al., 2024; Huang et al., 2024), we emphasize the importance of assessing the high-level experiment design of LLMs before the subsequent implementation to avoid any expensive execution iteration. Therefore, as shown in Figure 1, we formulate EXPDESIGN as a text-generation task that takes pre-experiment paper context as input, and then generates the experiment and explanation list.
|
| 63 |
+
|
| 64 |
+
$①$ Data crawling. As for the data source, we first collect $\geq 10\mathrm{k}$ papers' data from arXiv, including LaTeX sources and PDFs, which cover broad AI categories, including cs.AI, cs.CL, and cs.CV, from year 2018 to 2023. Similarly, to ensure the source data quality, we only use papers that have appeared at well-known conferences.
|
| 65 |
+
|
| 66 |
+
$②$ Domain-expert annotation. Making a reliable and executable experiment plan requires solid foundation knowledge of a specific research area. Consequently, we set a high standard for choosing annotators: i) be a senior Ph.D. student with at least one peer-reviewed publication in leading AI venues; ii) have more than 4 years of AI research experience; iii) frequently serve as conference reviewers. Finally, we invite a total of 10 qualified experts to participate in our data collection procedure. Given the $10\mathrm{k}$ crawled papers, we first ask every annotator to bid on the papers that they are interested in. After bidding, each of them is assigned 10 papers, i.e., a total of 100 papers to be annotated. During annotation, we post each paper PDF on online Google Drive and ask the annotator to first carefully read the whole paper. Then, we ask them to identify and locate the key experiments in each paper (i.e., highlighting the relevant paragraphs of each experiment). We don't consider some trivial experiments, such as those supplemental analyses in the appendix section. For each identified experiment, the
|
| 67 |
+
|
| 68 |
+
annotator has to concisely answer two questions: i) What did this experiment do? ii) Why did the paper authors conduct this experiment? In other words, we ask the annotator to summarize all the key experiments in this paper and explain the underlying motivations based on their rich domain experience.
|
| 69 |
+
|
| 70 |
+
$③$ Multi-round peer discussion. Intuitively, different experts might have different opinions on the same research topic. Particularly, when explaining the underlying motivation of an experiment, adopting only a single expert's opinion might introduce bias to our annotation. Hence, we conduct a further multi-round peer discussion. For each paper, where all the key experiments are identified, summarized, and explained, we ask a different expert (reviewer) to review the annotation by considering the following three criteria: i) Are the identified experiments all the key experiments? ii) Does each experiment summarization covers all key information? iii) Does each explanation sound reasonable and reliable? Each reviewer must leave comments on the online PDF regarding the above criteria, and then the annotator must respond to each comment — either accept the suggestion and revise the previous annotation or provide a "rebuttal" to the reviewer to uphold the annotation. This discussion is iterative until both opinions align. Eventually, for each paper, we collect two lists: i) the experiment list, summarizing each experiment step of the paper; ii) the explanation list, the underlying motivations that are one-one corresponding to the experiment.
|
| 71 |
+
|
| 72 |
+
Final data. After annotation, we use the pre-experiment context of each paper (according to the first-experiment location identified by the annotator) as the input. Furthermore, we use GPT-4 to delete any sentence that potentially leaks the experiment from the input.<sup>3</sup> Similar to the EQINFER, we utilize the source LaTeX as the input text to avoid PDF sparing noise. As for the image input, we collect those figures within each paper's source LaTeX package and only keep figures that are used in the pre-experiment context. Overall, a total of 100 instances are collected. As shown in Figure 1, the input of each instance is the pre-experiment context (including the figures), and the ground-truth output is the expert-annotated experiment plan and the explanations. Table 8 shows data statistics and Figure 9 illustrates the sample case in EXPDESIGN.
|
| 73 |
+
|
| 74 |
+
# 3.3. PAPERWEAKNESS
|
| 75 |
+
|
| 76 |
+
Another critical research task is paper review. Previous works have demonstrated the usefulness of the LLM-based review feedback (Gao et al., 2024; Jin et al., 2024; Lu et al., 2024). However, as indicated by Du et al. (2024); Liang et al. (2024), LLMs only excel at summarizing the research
|
| 77 |
+
|
| 78 |
+
strengths while falling significantly short on weakness criticism. Hence, we build WEAKNESS for particularly investigating the LLM-generated weaknesses.
|
| 79 |
+
|
| 80 |
+
$①$ Data crawling. We first crawl a total of 3,779 anonymous submissions of ICLR 2023 from OpenReview, $^{4}$ including PDF and other meta information (e.g., scores, decisions, and tracks). As the ICLR 2023 has 13 distinct tracks while the paper distribution across different tracks is highly biased, we then uniformly sample papers from different research tracks to improve the domain diversity. Meanwhile, during sampling, we also keep the accept/reject papers distributed equally to avoid data bias. In a word, we finally collect a total of 1,000 papers (500 accepted; 500 rejected), uniformly covering all 13 tracks. Please refer to Figure 3 for the track and score distribution of the 1,000 papers.
|
| 81 |
+
|
| 82 |
+
$②$ Extraction of human-written weaknesses. Since the raw comments crawled from $ICLR 2023$ are mixed with both strengths and weaknesses, we further employ GPT-4 to extract all the weaknesses from each reviewer's comments and compose multiple weaknesses into a list. Notably, we force GPT-4 to keep the original text of the reviewer, i.e., all weaknesses in our dataset are those original sentences written by the reviewer without any modifications. What's more, sometimes one reviewer might repeatedly mention the same weakness throughout the comment. In this case, we simply keep all the repeated weaknesses because, if one weakness is repeatedly mentioned by the reviewer, it's intuitively an important weakness that the reviewer wants to emphasise; accordingly, keeping the repeat items can penalize LLMs more on missing this weakness.
|
| 83 |
+
|
| 84 |
+
For each paper, we can finally get multiple weakness lists (one weakness list per reviewer, one paper can have multiple reviewers). We further delete a few papers without any weaknesses found in the raw comments, resulting in a total of 993 instances, i.e., 993 {paper, weakness lists} pairs.
|
| 85 |
+
|
| 86 |
+
$③$ Input data processing. As we mentioned before, we crawl papers from OpenReview instead of arXiv because the under-review paper draft is required for this task. However, not every paper from OpenReview can be found on arXiv, i.e., the source LaTeX code and figures of most under-review papers are unavailable. Therefore, we utilize VILA (Lin et al., 2023) to parse text data out from the PDF; we also employ PDFFigures-2.0 (Clark & Divvala, 2016) to extract all the figures and tables (in image) from the paper, as Vila is not good at processing the table data.
|
| 87 |
+
|
| 88 |
+
<sup>4</sup>We adopt ICLR because it releases full submissions, while some other conferences only release accepted papers.
|
| 89 |
+
|
| 90 |
+
5We manually checked GPT-4's extraction results of 200 cases — GPT-4 only missed $\leq 1\%$ of reviewer-written weaknesses and maintained almost all the original text.
|
| 91 |
+
|
| 92 |
+
Final data. Our final data is composed of 993 instances, each input is paper text along with figure/table images, and each output is peer reviewers' weakness lists. Table 9 shows data statistics; Figure 10 presents an example of the data instances. We show the data diversity (score and track distribution) in Figure 3.
|
| 93 |
+
|
| 94 |
+
# 4. Evaluation Criteria
|
| 95 |
+
|
| 96 |
+
For EQINFER, we adopt $\mathrm{F}_1$ as the classification criterion. For EXPDESIGN and WEAKNESS, since both tasks have free-form outputs, we develop several novel task-specific metrics in addition to the conventional ROUGE (Lin, 2004).
|
| 97 |
+
|
| 98 |
+
We use LLMs to evaluate the experiment list of EXPDESIGN. Specifically, given a model-predicted experiment list $p$ , and the ground-truth list $g$ , we calculate:
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\text {E n - P r e c i s i o n} = \frac {1}{m} \sum_ {i = 1} ^ {m} f \left(p _ {i}, g\right) \tag {1}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\text {E n - R e c a l l} = \frac {1}{n} \sum_ {j = 1} ^ {n} f (g _ {j}, p) \tag {2}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
where the $m$ and $n$ are the list length of $p$ and $g$ ; $f(.)$ represents the LLM prompting, where we prompt LLM to decide whether each predicted experiment item $(p_i)$ is entailed by the whole ground-truth list $(g)$ , proceeding with binary output, and vice versa. Intuitively, En-Precision reflects how many prediction experiments match ground-truth experiments. In this work, we used GPT-4o as an evaluator.
|
| 109 |
+
|
| 110 |
+
While for the explanation generation of EXPDESIGN, as the prediction experiments are one-on-one corresponding to the ground truth, we adopt a semantic-based metric:
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
\mathrm {S} - \text {M a t c h} = \frac {1}{m} \sum_ {i = 1} ^ {m} \operatorname {s i m} \left(p _ {i}, g _ {i}\right) \tag {3}
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
where we use SentenceBERT (Reimers, 2019) to measure the semantic similarity between $p_i$ and $g_j$ .
|
| 117 |
+
|
| 118 |
+
Unlike EXPDESIGN, the ground truth of WEAKNESS is multiple reviewers' weakness lists. Instead of merely merging the opinions of various reviewers into one flattened list and keeping LLM-as-judge as the metric (which is not only costly but also loses the structural information of diverse research perspectives), we employ the following semantic-based metric to efficiently evaluate predicted weaknesses:
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
\text {S - P r e c i s i o n} = \frac {1}{m} \sum_ {i = 1} ^ {m} \left(\frac {1}{r} \sum_ {k = 1} ^ {r} \max _ {j} \sin \left(p _ {i}, g _ {j} ^ {k}\right)\right) \tag {4}
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
$$
|
| 125 |
+
\text {S - R e c a l l} = \frac {1}{r} \sum_ {k = 1} ^ {r} \left(\frac {1}{n _ {k}} \sum_ {j = 1} ^ {n _ {k}} \max _ {i} \sin \left(g _ {j} ^ {k}, p _ {i}\right)\right) \tag {5}
|
| 126 |
+
$$
|
| 127 |
+
|
| 128 |
+
where $r$ is the number of reviewers of the given paper, $n_k$ means the length of $k$ -th reviewer's weakness list, and $g_j^k$
|
| 129 |
+
|
| 130 |
+
indicates the $j$ -th item in $k$ -th reviewer's weakness list.
|
| 131 |
+
|
| 132 |
+
Additionally, in the real world, we would think a review weakness is reliable if it is specific to a paper. Meanwhile, we also hope the review is informative, i.e., no excessive similar weaknesses in one review. Inspired by the classic TF-IDF, we propose a novel review diversity metric:
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
\text {I T F - I D F} = \frac {1}{w} \sum_ {j = 1} ^ {w} \left(\frac {1}{m _ {j}} \sum_ {i = 1} ^ {m _ {j}} \log \left(\frac {m _ {j}}{O _ {i} ^ {j}}\right) \times \log \left(\frac {w}{R _ {i} ^ {j}}\right)\right) \tag {6}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
O _ {i} ^ {j} = \sum_ {k = 1} ^ {m _ {j}} \operatorname {s i m} \left(p _ {i} ^ {j}, p _ {k} ^ {j}\right) \tag {7}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
R _ {i} ^ {j} = \sum_ {l = 1} ^ {w} \max _ {s} \sin \left(p _ {i} ^ {j}, p _ {s} ^ {l}\right) \tag {8}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
where the $w$ is the total number of papers in the dataset, $p^j$ is $j$ -th paper's prediction weakness list, $p_i^j$ is the $i$ -th weakness in $p^j$ . Moreover, $O_i^j$ calculates the intra-paper occurrence frequency of $p_i^j$ ; $R_i^j$ is the "soft" number of papers that also contain the $p_i^j$ , which is computed by summing the maximum similarity scores between $p_i^j$ and other paper's weaknesses. In a word, $O_i^j$ measures informativeness, and $R_i^j$ measures specificity. The complete ITF-IDF consider both aspects and reflects the overall weakness diversity.
|
| 147 |
+
|
| 148 |
+
# 5. Experiments and Analyses
|
| 149 |
+
|
| 150 |
+
In this section, we conduct extensive experiments on AAAR-1.0, across various mainstream LLMs, to quantify the current LLMs' capacity to tackle high-level research tasks. Specifically, § 5.1 for EQINFER, § 5.2 for EXPDESIGN, and § 5.3 for WEAKNESS. Please refer to Appendix B.2 for running details of the LLMs.
|
| 151 |
+
|
| 152 |
+
# 5.1. EQUATIONINFERENCE
|
| 153 |
+
|
| 154 |
+
Settings. As different LLMs have distinct context windows, to ensure a fair comparison, we fix the maximum input length for all models. According to Table 7, we empirically use 1,000 words for both contexts before and after equations, i.e., 2,000 surrounding words.
|
| 155 |
+
|
| 156 |
+
Main results. Table 1 shows the main results. Firstly, a simple baseline that predicts all equations as positive achieves $40\%$ $\mathrm{F_1}$ (due to the 1:3 of positive and negative equations), while nearly all open-source LLMs even cannot beat this naive baseline. Notably, though the performance of Mixtral is slightly superior to the baseline, the extremely biased precision and recall scores imply that Mixtral is also simply predicting almost all samples as positive instead of truly inferring. Meanwhile, compared to the All-Positive baseline, the performance superiority of the strong close-source LLMs is not significant, the best LLM on this task only obtains $47.98\%$ , which demonstrates the challenge of EQINFER compared with other similar benchmarks (Song
|
| 157 |
+
|
| 158 |
+
Table 1: Various LLMs' performances on EQINFER task (1,049 positive and 3,147 negative samples). "All-positive" indicates a baseline that predicts all equations as positive.
|
| 159 |
+
|
| 160 |
+
<table><tr><td>Methods</td><td>F1</td><td>Prec.</td><td>Rec.</td></tr><tr><td>All-Positive</td><td>40.00</td><td>25.00</td><td>100.00</td></tr><tr><td colspan="4">Open-source LLMs</td></tr><tr><td>OLMo-7B (Groeneveld et al., 2024)</td><td>13.64</td><td>11.93</td><td>15.91</td></tr><tr><td>Mistral-7B (Jiang et al., 2023)</td><td>28.45</td><td>19.28</td><td>54.24</td></tr><tr><td>Mixtral-8x22B-MoE (Jiang et al., 2024)</td><td>40.90</td><td>26.15</td><td>93.80</td></tr><tr><td>Qwen 2.5-72B (Qwen Team, 2024)</td><td>31.22</td><td>26.28</td><td>57.40</td></tr><tr><td>Llama 3.1-70B (MetaAI, 2024)</td><td>33.08</td><td>22.14</td><td>65.39</td></tr><tr><td colspan="4">Closed-source LLMs</td></tr><tr><td>Gemini 1.5 Pro (Anil et al., 2023)</td><td>46.74</td><td>32.05</td><td>86.27</td></tr><tr><td>Claude 3.5 sonnet (Anthropic, 2024)</td><td>45.13</td><td>29.48</td><td>96.18</td></tr><tr><td>GPT-4o (OpenAI, 2024a)</td><td>40.35</td><td>30.79</td><td>58.53</td></tr><tr><td>o1-preview (OpenAI, 2024b)</td><td>46.35</td><td>31.43</td><td>88.27</td></tr><tr><td>o3-mini (OpenAI, 2025)</td><td>47.98</td><td>34.34</td><td>79.59</td></tr></table>
|
| 161 |
+
|
| 162 |
+
et al., 2023). The generally high recall with low precision of all LLMs also indicates real-world risks, e.g., relying on LLMs to check the validity of equations in paper review.
|
| 163 |
+
|
| 164 |
+
$\mathcal{Q}$ : Do more contexts boost performance? EQINFER places high demands on reasoning within the scientific context. To quantify the impact of input context length, we scale the input length (per side) from 100 to 1,500 words. As shown in Figure 4, for the open-source LLMs (Llama and Qwen), an appropriate context length can boost the performance; while for GPT-4o, scaling up the context length doesn't contribute much to the $\mathrm{F}_1$ . However, during the scaling, we find that the precision of GPT-4o is gradually increased, and the recall is decreased accordingly; considering the label distribution of EQINFER, we believe precision can better reflect the model's true capacities on this task. Thus, we anticipate that scaling up context shall be beneficial to those strong close-source LLMs such as GPT-4o.
|
| 165 |
+
|
| 166 |
+
# 5.2. EXPERIMENTDESIGN
|
| 167 |
+
|
| 168 |
+
Settings. Similarly, we unify the input context length of different LLMs to ensure a fair comparison. According to Table 8, we set 2,000 and 3,000 input words for open-and closed-source LLMs, respectively. Meanwhile, as experiment explanation is the subsequent task of experiment design, using model-generated experiments can propagate errors in explanation, leading to inferior results for most LLMs. To this end, we provide LLMs with the oracle experiments when generating explanations.
|
| 169 |
+
|
| 170 |
+
Main results. Table 2 shows the main results. For the experiment design, the closed-source LLMs generally outperform open-source LLMs. However, the score values of all LLMs are relatively low $(20\% \sim 30\%)$ , implying the LLMs consistently miss ground-truth experiments from the origin paper (low recall), and they tend to generate
|
| 171 |
+
|
| 172 |
+
Table 2: Various LLMs' performances on the 100 instances of EXPDESIGN. The explanation generation is based on the oracle experiments to prevent error propagation. "Copy Input" directly copies each experiment idea as the explanation.
|
| 173 |
+
|
| 174 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="3">Experiment Design</td><td colspan="3">Experiment Explanation</td></tr><tr><td>En-F1</td><td>En-Precision</td><td>En-Recall</td><td>S-Match</td><td>ROUGE-L</td><td>ROUGE-1</td></tr><tr><td>Copy Input</td><td>—</td><td>—</td><td>—</td><td>40.32</td><td>22.06</td><td>25.28</td></tr><tr><td colspan="7">Open-source LLMs</td></tr><tr><td>OLMo-7B (Groeneveld et al., 2024)</td><td>14.80</td><td>17.50</td><td>19.80</td><td>45.78</td><td>26.30</td><td>30.38</td></tr><tr><td>Mistral-7B (Jiang et al., 2023)</td><td>18.96</td><td>24.83</td><td>21.38</td><td>50.18</td><td>30.20</td><td>34.69</td></tr><tr><td>Mixtral-8x22B-MoE (Jiang et al., 2024)</td><td>23.16</td><td>24.45</td><td>30.57</td><td>49.07</td><td>29.96</td><td>34.53</td></tr><tr><td>Llama 3.1-70B (MetaAI, 2024)</td><td>22.92</td><td>23.10</td><td>29.76</td><td>50.05</td><td>29.33</td><td>34.11</td></tr><tr><td>Qwen 2.5-72B (Qwen Team, 2024)</td><td>24.28</td><td>22.48</td><td>34.44</td><td>51.12</td><td>29.46</td><td>34.68</td></tr><tr><td colspan="7">Closed-source LLMs</td></tr><tr><td>Gemini 1.5 Pro (Anil et al., 2023)</td><td>27.25</td><td>28.66</td><td>34.92</td><td>52.87</td><td>28.52</td><td>33.80</td></tr><tr><td>Claude 3.5 sonnet (Anthropic, 2024)</td><td>27.99</td><td>24.48</td><td>42.09</td><td>53.03</td><td>18.75</td><td>26.15</td></tr><tr><td>GPT-4o (OpenAI, 2024a)</td><td>25.03</td><td>22.25</td><td>36.59</td><td>54.79</td><td>27.54</td><td>34.31</td></tr><tr><td>o1-preview (OpenAI, 2024b)</td><td>30.13</td><td>28.13</td><td>38.59</td><td>58.55</td><td>29.11</td><td>36.70</td></tr><tr><td>o3-mini (OpenAI, 2025)</td><td>30.17</td><td>28.70</td><td>37.67</td><td>54.01</td><td>20.71</td><td>29.14</td></tr></table>
|
| 175 |
+
|
| 176 |
+
more novel experiments that didn't show in the origin paper (low precision). As for the experiment explanation, the S-Match scores of closed-source LLMs still surpass the open-source LLMs. Furthermore, there is a negative correlation between S-Match and ROUGE score, where the ROUGE scores of closed-source LLMs are broadly inferior. We find that the open-source LLMs often try to copy the terms or phrases from the given experiment, or even simply paraphrase the experiment instead of explaining, which results in a high superficial overlap with the ground-truth explanation. This observation highlights the importance of adopting the proposed S-Match to avoid evaluation bias of traditional generation metrics.
|
| 177 |
+
|
| 178 |
+
$\mathcal{Q}_1$ : What is the quality of the model-generated novel experiments? The low En-Precision of LLMs in Table 2 indicates the creativity of LLMs in generating novel experiments. We then randomly sample 15 papers from the EXPDESIGN and ask 3 experts to manually review the model-generated novel experiments. Specifically, we ask the experts to judge the necessity of the novel experiments, where we set three necessity levels: "A" indicates the experiment is necessary/mandatory to support the main claim, "B" represents optional/supplementary experiments, and "C" for those unrelated experiments (see Appendix C.2 for evaluation details). Table 3 shows the necessity scores of the three strongest LLMs. We find that LLMs consistently generate a lot of novel experiments, especially the Claude; though most of them are optional, even fancy/unrelated experiments, there are still a considerable amount of necessary experiments generated, e.g., the results of o1. We further find that some novel experiments can be regarded as useful supplementary analyses w.r.t. the human experiments. Table 11 shows examples of model-suggested experiments.
|
| 179 |
+
|
| 180 |
+
Table 3: The human evaluation results on the novel experiments suggested by LLMs. "A", "B", and "C" represent the different quality level (i.e., necessity); "A" is the best level.
|
| 181 |
+
|
| 182 |
+
<table><tr><td rowspan="2">Models</td><td rowspan="2"># of novel EXP</td><td colspan="2">Necessity (%)</td></tr><tr><td>A</td><td>B</td></tr><tr><td>Gemini 1.5 Pro</td><td>59</td><td>30.59</td><td>45.76</td></tr><tr><td>Claude 3.5 sonnet</td><td>112</td><td>21.78</td><td>50.00</td></tr><tr><td>o1-preview</td><td>71</td><td>35.84</td><td>36.61</td></tr></table>
|
| 183 |
+
|
| 184 |
+
Table 4: The impact on S-Match scores of maintaining the experiment's self-containment for EXPDESIGN.
|
| 185 |
+
|
| 186 |
+
<table><tr><td>Models</td><td>One-by-One</td><td>Whole-List</td></tr><tr><td>Llama 3.1-70B</td><td>50.05</td><td>49.36 (↓ 0.7)</td></tr><tr><td>Qwen 2.5-72B</td><td>51.12</td><td>48.56 (↓ 2.6)</td></tr><tr><td>Gemini 1.5 Pro</td><td>52.87</td><td>57.48 (↑ 4.6)</td></tr><tr><td>Claude 3.5 sonnet</td><td>53.03</td><td>59.11 (↑ 6.1)</td></tr><tr><td>GPT-4</td><td>55.03</td><td>56.95 (↑ 1.9)</td></tr><tr><td>GPT-4o</td><td>54.79</td><td>58.54 (↑ 3.8)</td></tr><tr><td>o1-preview</td><td>58.55</td><td>61.58 (↑ 3.0)</td></tr></table>
|
| 187 |
+
|
| 188 |
+
$\mathcal{Q}_2$ : Can self-contained experiment design enhance the experiment explanation? When generating the explanation in Table 2, we provide LLMs with each individual experiment and let them explain one by one, because we find that, when providing the whole experiment list, those open-source models only explain partial experiments because of their poor instruction-following capacity. However, there are intuitively some semantic or logical relations between different experiments, e.g., some experiments are prerequisite
|
| 189 |
+
|
| 190 |
+
Table 5: The human evaluation results on LLMs' output explanations of EXPDESIGN. "Acc. ratio" means how many model outputs are accepted by the annotator.
|
| 191 |
+
|
| 192 |
+
<table><tr><td>Models</td><td>Acc. ratio</td></tr><tr><td>Llama 3.1-70B</td><td>22.93</td></tr><tr><td>Gemini 1.5 Pro</td><td>55.07</td></tr><tr><td>Claude 3.5 sonnet</td><td>61.46</td></tr><tr><td>GPT-4o</td><td>69.72</td></tr><tr><td>o1-preview</td><td>76.14</td></tr></table>
|
| 193 |
+
|
| 194 |
+
sites to others. Therefore, this one-by-one prompting might break the self-containment of an experiment plan. Consequently, we test with the "whole-list" prompting, where the LLMs are given the complete experiment list and are asked to explain all experiment steps together.
|
| 195 |
+
|
| 196 |
+
As shown in Table 4, unlike the open-source LLMs, the explanation performances of those closed-source LLMs are generally improved after adopting whole-list prompting. According to further manual checking, after maintaining the self-containment of the experiments, the LLMs can refer to other experiments and better grasp the underlying motivation of the current experiment.
|
| 197 |
+
|
| 198 |
+
$\mathcal{Q}_3$ : Do human evaluation results align with automatic metrics for explanation? As the explanation can be open-ended, in this paragraph, we provide the human evaluation results on different LLMs' experiment explanation outputs. In detail, we randomly select 20 out of 100 papers and ask 5 annotators to read the experiments along with each model's explanations; we then let the annotator decide whether each model's explanation is acceptable (see Appendix C.3 for more details). Table 5 illustrates the results, where the score variance is higher than Table 2. However, the performance ranking of both tables is perfectly correlated with each other (Spearman's rank correlation coefficient $= 1$ ), demonstrating the effectiveness of S-Match.
|
| 199 |
+
|
| 200 |
+
$\mathcal{Q}_4$ : Do more contexts boost performance? We also investigate the impact of input context length for EXPDESIGN. As shown in Figure 5, we scale up the input pre-experiment context length from 0.1k to 10k tokens (10k is the length of the longest paper). For the experiment design, more input context does improve the performance of different LLMs, while this benefit stops after exceeding 8k tokens, which means that after the necessary information has been covered, scaling context becomes inefficient. Meanwhile, the explanation generation results reveal that LLMs primarily depend on given experiments rather than paper context to explain motivations. However, we do not expect this as we hope LLMs can explain the motivation based on a thorough
|
| 201 |
+
|
| 202 |
+
understanding of the paper, just like how human experts do. Hence, there is still a considerable gap between the LLMs and humans in terms of grasping research motivations.
|
| 203 |
+
|
| 204 |
+
$\mathcal{Q}_5$ : Does multi-modal input boost performance? Intuitively, besides the text, when designing experiments for a given research topic, the figures can provide rich supplementary information, such as an algorithm illustration that can help better understand this research topic and underlying motivations. Hence, we test the performance of different LMMs (Large Multimodal Models), including GPT4-o and InternVL2 (Chen et al., 2024b). Table 12 shows the ablation results on the figure data. To our surprise, the figure data doesn't improve the LMMs' results in this task, even harming the performances. This might be due to the low informativeness of the figures, as figures usually consume more input tokens but act only as supplementary information to the text, indicating future work on developing LMMs that can effectively leverage the scientific figures.
|
| 205 |
+
|
| 206 |
+
# 5.3. PAPERWEAKNESS
|
| 207 |
+
|
| 208 |
+
Settings. Intuitively, full paper content is necessary for paper reviewing. Therefore, instead of setting a maximum input length, in WEAKNESS, we try to utilize the whole paper. As the input length of WEAKNESS is extremely long (see Table 9), we adopt a "split-combine" method — we first split the whole paper into smaller pieces and let LLMs predict the weaknesses of each piece separately; after that, we merge all pieces' weaknesses as a final prediction. For the length of each small piece, we set 2,000 and 3,000 words for open- and closed-source LLMs, respectively. Additionally, in this task, we also examine the performance of AI-SCI (Lu et al., 2024), which enhances LLMs' paper review ability by leveraging advanced prompting techniques, e.g., self-reflection (Shinn et al., 2024) and response ensembling (Wang et al., 2023).<sup>6</sup>
|
| 209 |
+
|
| 210 |
+
Main results. Table 6 shows the main results, where the closed-source LLMs' overall performances are generally superior to the results of open-source LLMs. Similarly, closed-source LLMs are particularly excellent in S-Recall because of more generated weaknesses. However, there is still a considerable gap in the weakness diversity between the LLMs and human experts.<sup>7</sup> Compared with human review, most LLM-generated weaknesses are vague and lack the necessary knowledge about some frontier research works. Surprisingly, AI-SCI performs worse than backbone GPT
|
| 211 |
+
|
| 212 |
+
Table 6: Various LLMs' performances on the 993 instances of WEAKNESS.
|
| 213 |
+
|
| 214 |
+
<table><tr><td>Methods</td><td>S-F1 (%)</td><td>S-Precision (%)</td><td>S-Recall (%)</td><td>Weakness Diversity
|
| 215 |
+
ITF-IDF (↑)</td></tr><tr><td>Human Review</td><td>—</td><td>—</td><td>—</td><td>7.69</td></tr><tr><td colspan="5">Open-source LLMs</td></tr><tr><td>OLMo-7B (Groeneveld et al., 2024)</td><td>43.25</td><td>40.38</td><td>47.04</td><td>2.45</td></tr><tr><td>Mistral-7B (Jiang et al., 2023)</td><td>42.03</td><td>43.80</td><td>40.77</td><td>1.17</td></tr><tr><td>Mixtral-8x22B-MoE (Jiang et al., 2024)</td><td>43.23</td><td>44.59</td><td>42.23</td><td>0.98</td></tr><tr><td>Llama 3.1-70B (MetaAI, 2024)</td><td>42.78</td><td>43.19</td><td>42.70</td><td>2.60</td></tr><tr><td>Qwen 2.5-72B (Qwen Team, 2024)</td><td>42.74</td><td>43.80</td><td>42.05</td><td>1.21</td></tr><tr><td colspan="5">Closed-source LLMs</td></tr><tr><td>Gemini 1.5 Pro (Anil et al., 2023)</td><td>48.75</td><td>43.97</td><td>55.08</td><td>5.88</td></tr><tr><td>Claude 3.5 sonnet (Anthropic, 2024)</td><td>47.85</td><td>41.97</td><td>56.00</td><td>3.91</td></tr><tr><td>GPT-4o (OpenAI, 2024a)</td><td>47.73</td><td>42.09</td><td>55.48</td><td>5.95</td></tr><tr><td>o1-preview (OpenAI, 2024b)</td><td>48.62</td><td>42.54</td><td>57.08</td><td>5.63</td></tr><tr><td>o3-mini (OpenAI, 2025)</td><td>46.33</td><td>42.00</td><td>51.99</td><td>5.85</td></tr><tr><td colspan="5">LLM Agent Framework</td></tr><tr><td>AI-SCI (GPT-4o) (Lu et al., 2024)</td><td>45.05</td><td>40.02</td><td>51.91</td><td>2.23</td></tr></table>
|
| 216 |
+
|
| 217 |
+
4o, especially on ITF-IDF, which suggests the challenge of WEAKNESS, i.e., simply adopting popular prompting techniques cannot well address this task.
|
| 218 |
+
|
| 219 |
+
$Q_{1}$ : Is the split-combine effective? Ideally, if the LLM has a sufficient context window size, splitting the input papers for separate processing is unnecessary. Consequently, in this paragraph, we utilize the LLMs accepting long context input to compare "split-combine" with "no-split", i.e., letting LLMs write weaknesses by giving the full paper. In practice, we set the maximum number of input words to $20k$ , which ensures $\geq 95\%$ papers in the WEAKNESS can be fully processed. As shown in Table 10, compared with giving the full paper contexts, split-combine generally brings about superior performances. During manual checking, we find that, when full paper is available, LLMs frequently neglect some important sections and omit weaknesses accordingly, while split-combine ensures that the LLMs can carefully brainstorm weaknesses within each smaller piece. Surprisingly, the LLMs' performances with full paper context can be even worse than just remaining the first 3,000 words. This implies that even the current powerful long-context LLMs still fall short when processing long scientific documents.
|
| 220 |
+
|
| 221 |
+
$\mathcal{Q}_2$ : Does multi-modal input boost performance? Our dataset covers both tables and figure illustrations extracted from the paper PDF as inputs. Intuitively, when reviewing a paper, both figures and tables are critical, not only for a better understanding, but also because some weaknesses are related to tables/figures.<sup>8</sup> Therefore, in Table 13, we adopt
|
| 222 |
+
|
| 223 |
+
two LMMs to investigate the effectiveness of image inputs. Overall, image information, including both figures and tables, doesn't bring significant performance improvement, i.e., only InternVL2 gains a performance boost after incorporating figures; while tables slightly drop both models' results. This is probably because the LMMs cannot reason well over the information-intensive images, especially the table images.
|
| 224 |
+
|
| 225 |
+
# 6. Conclusion
|
| 226 |
+
|
| 227 |
+
In this work, we propose AAAR-1.0, a novel benchmark targeting a comprehensive evaluation of the current LLMs' capacity in assisting AI research. AAAR-1.0 consists of distinct expertise-intensive tasks along with the curated evaluation metrics. We collect high-quality data by employing senior AI researchers and conducting strict data examinations. Extensive experiments highlight the challenges and values of AAAR-1.0.
|
| 228 |
+
|
| 229 |
+
# Acknowledgments
|
| 230 |
+
|
| 231 |
+
The authors would like to thank Ibraheem Moosa and Sarkar Snigdha Sarathi Das for assisting in the data collection.
|
| 232 |
+
|
| 233 |
+
# Impact Statement
|
| 234 |
+
|
| 235 |
+
Our study explores whether LLMs can assist human researchers in AI research. We do not advocate for AI replacing human researchers. Instead, we stress that the primary responsibility for scientific research should remain with humans to prevent societal risks, with LLMs serving as tools to
|
| 236 |
+
|
| 237 |
+
enhance research efficiency. Specifically, our work analyzes the strengths and weaknesses of LLMs to ensure researchers remain judicious in their use of these tools. Our goal is to mitigate risks while maximizing the benefits offered by LLMs. We are committed to the careful distribution of data collected in our research, ensuring it is used solely for research purposes.
|
| 238 |
+
|
| 239 |
+
# References
|
| 240 |
+
|
| 241 |
+
Almazrouei, E., Alobeidli, H., Alshamsi, A., Cappelli, A., Cojocaru, R., Debbah, M., Goffinet, E., Heslow, D., Lau nay, J., Malartic, Q., Noune, B., Pannier, B., and Penedo, G. Falcon-40B: an open large language model with state-of-the-art performance, 2023.
|
| 242 |
+
Anil, R., Borgeaud, S., Wu, Y., Alayrac, J.-B., Yu, J., Soricut, R., Schalkwyk, J., Dai, A. M., Hauth, A., Team, G., et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023.
|
| 243 |
+
Anthropic. Introducing claude 3.5 sonnet. https:// www.anthropic.com/news/claude-3-5-sonnet, June 2024.
|
| 244 |
+
Chamoun, E., Schlichktrull, M., and Vlachos, A. Automated focused feedback generation for scientific writing assistance. arXiv preprint arXiv:2405.20477, 2024.
|
| 245 |
+
Chan, J. S., Chowdhury, N., Jaffe, O., Aung, J., Sherburn, D., Mays, E., Starace, G., Liu, K., Maksin, L., Patwardhan, T., et al. Mle-bench: Evaluating machine learning agents on machine learning engineering. arXiv preprint arXiv:2410.07095, 2024.
|
| 246 |
+
Chen, Z., Chen, S., Ning, Y., Zhang, Q., Wang, B., Yu, B., Li, Y., Liao, Z., Wei, C., Lu, Z., et al. Scienceagentbench: Toward rigorous assessment of language agents for data-driven scientific discovery. arXiv preprint arXiv:2410.05080, 2024a.
|
| 247 |
+
Chen, Z., Wang, W., Tian, H., Ye, S., Gao, Z., Cui, E., Tong, W., Hu, K., Luo, J., Ma, Z., et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024b.
|
| 248 |
+
Clark, C. and Divvala, S. Pdfigures 2.0: Mining figures from research papers. In Proceedings of the 16th ACM/IEEE-CS on Joint Conference on Digital Libraries, pp. 143-152, 2016.
|
| 249 |
+
Du, J., Wang, Y., Zhao, W., Deng, Z., Liu, S., Lou, R., Zou, H. P., Venkit, P. N., Zhang, N., Srinath, M., Zhang, H. R., Gupta, V., Li, Y., Li, T., Wang, F., Liu, Q., Liu, T., Gao, P., Xia, C., Xing, C., Cheng, J., Wang, Z., Su, Y., Shah, R. S., Guo, R., Gu, J., Li, H., Wei, K., Wang,
|
| 250 |
+
|
| 251 |
+
Z., Cheng, L., Ranathunga, S., Fang, M., Fu, J., Liu, F., Huang, R., Blanco, E., Cao, Y., Zhang, R., Yu, P. S., and Yin, W. Llms assist NLP researchers: Critique paper (meta-)reviewing. In The 2024 Conference on Empirical Methods in Natural Language Processing, 2024. doi: 10.48550/ARXIV.2406.16253. URL https://doi.org/10.48550/arXiv.2406.16253.
|
| 252 |
+
Gao, Z., Brantley, K., and Joachims, T. Reviewer2: Optimizing review generation through prompt generation. arXiv preprint arXiv:2402.10886, 2024.
|
| 253 |
+
Groeneveld, D., Beltagy, I., Walsh, P., Bhagia, A., Kinney, R., Tafjord, O., Jha, A. H., Ivison, H., Magnusson, I., Wang, Y., Arora, S., Atkinson, D., Authur, R., Chandu, K., Cohan, A., Dumas, J., Elazar, Y., Gu, Y., Hessel, J., Khot, T., Merrill, W., Morrison, J., Muennighoff, N., Naik, A., Nam, C., Peters, M. E., Pyatkin, V., Ravichander, A., Schwenk, D., Shah, S., Smith, W., Subramani, N., Wortsman, M., Dasigi, P., Lambert, N., Richardson, K., Dodge, J., Lo, K., Soldaini, L., Smith, N. A., and Hajishirzi, H. Olmo: Accelerating the science of language models. Preprint, 2024.
|
| 254 |
+
Gu, J., Ye, J., Yin, W., and Wang, G. Adaptive and explainable margin trading via large language models on portfolio management. In Proceedings of the 5th ACM International Conference on AI in Finance (ICAIF'24), 2024.
|
| 255 |
+
Huang, Q., Vora, J., Liang, P., and Leskovec, J. Mlagent-bench: Evaluating language agents on machine learning experimentation. In *Forty-first International Conference on Machine Learning*, 2024.
|
| 256 |
+
Jiang, A. Q., Sablayrolles, A., Mensch, A., Bamford, C., Chaplot, D. S., Casas, D. d. l., Bressand, F., Lengyel, G., Lample, G., Saulnier, L., et al. Mistral 7b. arXiv preprint arXiv:2310.06825, 2023.
|
| 257 |
+
Jiang, A. Q., Sablayrolles, A., Roux, A., Mensch, A., Savary, B., Bamford, C., Chaplot, D. S., Casas, D. d. l., Hanna, E. B., Bressand, F., et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024.
|
| 258 |
+
Jin, Y., Zhao, Q., Wang, Y., Chen, H., Zhu, K., Xiao, Y., and Wang, J. Agentreview: Exploring peer review dynamics with llm agents. arXiv preprint arXiv:2406.12708, 2024.
|
| 259 |
+
Kumar, S., Ghosal, T., Goyal, V., and Ekbal, A. Can large language models unlock novel scientific research ideas? arXiv preprint arXiv:2409.06185, 2024.
|
| 260 |
+
Labrak, Y., Bazoge, A., Morin, E., Gourraud, P.-A., Rouvier, M., and Dufour, R. Biomistral: A collection of open-source pretrained large language models for medical domains. arXiv preprint arXiv:2402.10373, 2024.
|
| 261 |
+
|
| 262 |
+
Li, H., Jiang, H., Zhang, T., Yu, Z., Yin, A., Cheng, H., Fu, S., Zhang, Y., and He, W. Traineragent: Customizable and efficient model training through llm-powered multi-agent system. arXiv preprint arXiv:2311.06622, 2023.
|
| 263 |
+
Li, R., Patel, T., Wang, Q., and Du, X. Mlr-copilot: Autonomous machine learning research based on large language models agents. arXiv preprint arXiv:2408.14033, 2024.
|
| 264 |
+
Liang, W., Zhang, Y., Cao, H., Wang, B., Ding, D. Y., Yang, X., Vodrahalli, K., He, S., Smith, D. S., Yin, Y., et al. Can large language models provide useful feedback on research papers? a large-scale empirical analysis. NEJM AI, 1(8):A1oa2400196, 2024.
|
| 265 |
+
Lin, C.-Y. Rouge: A Package for Automatic Evaluation of Summaries. In Text summarization branches out, pp. 74-81, 2004.
|
| 266 |
+
Lin, J., Yin, H., Ping, W., Lu, Y., Molchanov, P., Tao, A., Mao, H., Kautz, J., Shoeybi, M., and Han, S. Vila: On pre-training for visual language models, 2023.
|
| 267 |
+
Liu, N. F., Lin, K., Hewitt, J., Paranjape, A., Bevilacqua, M., Petroni, F., and Liang, P. Lost in the middle: How language models use long contexts. Transactions of the Association for Computational Linguistics, 12:157-173, 2024.
|
| 268 |
+
Lo, K., Shen, Z., Newman, B., Chang, J. Z., Authur, R., Bransom, E., Candra, S., Chandrasekhar, Y., Huff, R., Kuehl, B., et al. Papermage: A unified toolkit for processing, representing, and manipulating visually-rich scientific documents. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pp. 495-507, 2023.
|
| 269 |
+
Lu, C., Lu, C., Lange, R. T., Foerster, J., Clune, J., and Ha, D. The AI Scientist: Towards fully automated open-ended scientific discovery. arXiv preprint arXiv:2408.06292, 2024.
|
| 270 |
+
MetaAI. Introducing llama 3.1: Our most capable models to date. https://ai.meta.com/blog/meta-llama-3-1/, July 2024.
|
| 271 |
+
Neuman, Y., Cohen, Y., and Yin, W. Identifying social norm violation in movie plots: from borat to american pie. Digit. Scholarsh. Humanit., 38(4):1636-1645, 2023. doi: 10.1093/LLC/FQAD052. URL https://doi.org/10.1093/llc/fqad052.
|
| 272 |
+
OpenAI. Hello gpt-4o. https://openai.com/index/hello-gpt-4o/, May 2024a.
|
| 273 |
+
OpenAI. Introducing openai o1. https://openai.com/index/introducing-openai-o1-preview/, September 2024b.
|
| 274 |
+
|
| 275 |
+
OpenAI. Openai o3-mini. https://openai.com/index/openai-o3-mini/, January 2025.
|
| 276 |
+
Praskievicz, S. River classification as a geographic tool in the age of big data and global change. Geographical Review, 108(1):120-137, 2018.
|
| 277 |
+
Rakhimov, M., Akhmadjonov, R., and Javliev, S. Artificial intelligence in medicine for chronic disease classification using machine learning. In 2022 IEEE 16th International Conference on Application of Information and Communication Technologies (AICT), pp. 1-6. IEEE, 2022.
|
| 278 |
+
Reimers, N. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084, 2019.
|
| 279 |
+
Shinn, N., Cassano, F., Gopinath, A., Narasimhan, K., and Yao, S. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, 36, 2024.
|
| 280 |
+
Si, C., Yang, D., and Hashimoto, T. Can llms generate novel research ideas? a large-scale human study with $100+$ nlp researchers. arXiv preprint arXiv:2409.04109, 2024.
|
| 281 |
+
Song, L., Zhang, J., Cheng, L., Zhou, P., Zhou, T., and Li, I. Nlpbench: Evaluating large language models on solving nlp problems. arXiv preprint arXiv:2309.15630, 2023.
|
| 282 |
+
Tang, X., Liu, Y., Cai, Z., Shao, Y., Lu, J., Zhang, Y., Deng, Z., Hu, H., An, K., Huang, R., et al. Ml-bench: Evaluating large language models and agents for machine learning tasks on repository-level code. arXiv e-prints, pp. arXiv-2311, 2023.
|
| 283 |
+
Team, G. Google launches gemma 2, its next generation of open models. https://blog.google/technology/ developers/google-gemma-2/, Jun 2024a.
|
| 284 |
+
Team, Q. Qwen2.5: A party of foundation models, September 2024b. URL https://qwenlm.github.io/blog/qwen2.5/.
|
| 285 |
+
Wang, M., Chen, L., Fu, C., Liao, S., Zhang, X., Wu, B., Yu, H., Xu, N., Zhang, L., Luo, R., et al. Leave no document behind: Benchmarking long-context llms with extended multi-doc qa. arXiv preprint arXiv:2406.17419, 2024.
|
| 286 |
+
Wang, X., Wei, J., Schuurmans, D., Le, Q. V., Chi, E. H., Narang, S., Chowdhery, A., and Zhou, D. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. URL https://openreview.net/pdf?id=1PL1NIMMrw.
|
| 287 |
+
|
| 288 |
+
Weng, Y., Zhu, M., Bao, G., Zhang, H., Wang, J., Zhang, Y., and Yang, L. Cycleresearcher: Improving automated research via automated review. arXiv preprint arXiv:2411.00816, 2024.
|
| 289 |
+
Yu, B., Baker, F. N., Chen, Z., Ning, X., and Sun, H. Llasmol: Advancing large language models for chemistry with a large-scale, comprehensive, high-quality instruction tuning dataset. arXiv preprint arXiv:2402.09391, 2024a.
|
| 290 |
+
Yu, H., Hong, Z., Cheng, Z., Zhu, K., Xuan, K., Yao, J., Feng, T., and You, J. Researchtown: Simulator of human research community. arXiv preprint arXiv:2412.17767, 2024b.
|
| 291 |
+
Zhu, M., Weng, Y., Yang, L., and Zhang, Y. Deepreview: Improving llm-based paper review with human-like deep thinking process. arXiv preprint arXiv:2503.08569, 2025.
|
| 292 |
+
|
| 293 |
+
# Appendices
|
| 294 |
+
|
| 295 |
+
Within this supplementary material, we elaborate on the following aspects:
|
| 296 |
+
|
| 297 |
+
- Appendix A: Data Statistics and Diversity
|
| 298 |
+
- Appendix B: Implementation Details
|
| 299 |
+
- Appendix C: More Experiment Results and Details
|
| 300 |
+
- Appendix D: Data Cases and Annotation Platform Illustration
|
| 301 |
+
- Appendix E: Prompt Templates
|
| 302 |
+
|
| 303 |
+
# A. Data Statistics and Diversity
|
| 304 |
+
|
| 305 |
+
We provide the detailed data statistics of three datasets in our benchmark, as shown in Table 7, 8, and 9. We use the NLTK package to tokenize words and count the length. When calculating the length of equations, we use the pylatexenc tool to simplify the equations first.
|
| 306 |
+
|
| 307 |
+
Meanwhile, for the WEAKNESS, we also plot the review scores distribution of the papers used in the dataset, as well as the track distribution. As can be found in Figure 3, our dataset has a decent distribution, where the papers are uniformly distributed across 13 tracks, and most papers' scores ranged from 5 to 8 (i.e., most papers are weakly rejected or accepted).
|
| 308 |
+
|
| 309 |
+
Table 7: The statistics of EQINFER. Here, the "left" and "right" input context indicates the paper contexts before and after the missed equation; "pos." means the ground-truth equations (written by the source paper authors), while "neg." is the GPT4-synthetic wrong equations.
|
| 310 |
+
|
| 311 |
+
<table><tr><td># of positive equations</td><td>1,049</td></tr><tr><td># of negative equations</td><td>3,147</td></tr><tr><td># of source papers</td><td>869</td></tr><tr><td>ave. “left” input context length (in words)</td><td>4,377</td></tr><tr><td>ave. “right” input context length (in words)</td><td>6,362</td></tr><tr><td>max “left” input context length (in words)</td><td>24,849</td></tr><tr><td>max “right” input context length (in words)</td><td>32,948</td></tr><tr><td>min “left” input context length (in words)</td><td>711</td></tr><tr><td>min “right” input context length (in words)</td><td>8</td></tr><tr><td>ave. “pos.” output equation length (in character)</td><td>55</td></tr><tr><td>ave. “neg.” output equation length (in character)</td><td>48</td></tr><tr><td>max “pos.” output equation length (in character)</td><td>1,039</td></tr><tr><td>max “neg.” output equation length (in character)</td><td>306</td></tr><tr><td>min “pos.” output equation length (in character)</td><td>6</td></tr><tr><td>min “neg.” output equation length (in character)</td><td>4</td></tr></table>
|
| 312 |
+
|
| 313 |
+
# B. Implementation Details
|
| 314 |
+
|
| 315 |
+
# B.1. Metric Details
|
| 316 |
+
|
| 317 |
+
When calculating the metrics, specifically for the similarity-based scores, we utilize SentenceBERT (Reimers, 2019) to encode each segment (e.g., each experiment idea in the list) into a dense vector, and then calculate the cosine similarity, $^{11}$ which takes about 1GB of memory when running on a single A100 GPU.
|
| 318 |
+
|
| 319 |
+
Table 8: The statistics of EXPDESIGN.
|
| 320 |
+
|
| 321 |
+
<table><tr><td># of instances</td><td>100</td></tr><tr><td># of source papers</td><td>100</td></tr><tr><td>ave. input context length (in words)</td><td>4,288</td></tr><tr><td>max input context length (in words)</td><td>9,799</td></tr><tr><td>min input context length (in words)</td><td>698</td></tr><tr><td>ave. # of input figures</td><td>2.6</td></tr><tr><td>max # of input figures</td><td>16.0</td></tr><tr><td>min # of input figures</td><td>0.0</td></tr><tr><td>ave. length of Experiment&Explanation list</td><td>5.7</td></tr><tr><td>ave. length per experiment (in words)</td><td>34.3</td></tr><tr><td>ave. length per explanation (in words)</td><td>27.1</td></tr><tr><td>max length of Experiment&Explanation list</td><td>13</td></tr><tr><td>max length per experiment (in words)</td><td>135</td></tr><tr><td>max length per explanation (in words)</td><td>89</td></tr><tr><td>min length of Experiment&Explanation list</td><td>2</td></tr><tr><td>min length per experiment (in words)</td><td>9</td></tr><tr><td>min length per explanation (in words)</td><td>9</td></tr></table>
|
| 322 |
+
|
| 323 |
+
# B.2. LLMs Running Details
|
| 324 |
+
|
| 325 |
+
In our experiments, we utilize various LLMs, including both closed and open-sourced. We list the model weight sources for the open-source LLMs:
|
| 326 |
+
|
| 327 |
+
- OLMo-7B (Groeneveld et al., 2024): https://huggingface.co/allenai/OLMo-7B
|
| 328 |
+
- Falcon-40B (Almazrouei et al., 2023): https://huggingface.co/tiiuae/falcon-40b
|
| 329 |
+
- Gemma 2-27B (Gemma Team, 2024): https://huggingface.co/google/gemma-2-27b
|
| 330 |
+
- Mistral-7B (Jiang et al., 2023): https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3
|
| 331 |
+
- Mixtral-8x22B-MoE (Jiang et al., 2024): https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1
|
| 332 |
+
- Llama 3.1-70B (MetaAI, 2024): https://huggingface.co/meta-llama/Llama-3.1-70B
|
| 333 |
+
Qwen 2.5-72B (Qwen Team, 2024): https://huggingface.co/Qwen/Qwen2.5-72B
|
| 334 |
+
|
| 335 |
+
We use VLLM to unify the inference endpoints of all the above models.12 We use Pytorch 2.4.0 with CUDA 12.1, and use 8 NVIDIA A100 GPUs for the LLMs inference.
|
| 336 |
+
|
| 337 |
+
Meanwhile, we use the gpt-4o-2024-08-06, gpt-4-1106-preview, o1-preview-2024-09-12, gemini-1.5-pro-002, and claude-3-5-sonnet-20240620 for the closed-source LLMs. We use LiteLLM to unify the API calling for all these LLMs. $^{13}$
|
| 338 |
+
|
| 339 |
+
Given the unstable performance of LLMs, particularly closed-source ones, we run each model thrice during our experiments, selecting the median result from these repeated runs.
|
| 340 |
+
|
| 341 |
+
Table 9: The statistics of WEAKNESS.
|
| 342 |
+
|
| 343 |
+
<table><tr><td># of instances</td><td>993</td></tr><tr><td># of source papers</td><td>993</td></tr><tr><td>ave. input context length (in words)</td><td>9,811</td></tr><tr><td>max input context length (in words)</td><td>49,195</td></tr><tr><td>min input context length (in words)</td><td>24</td></tr><tr><td>ave. # of input figures</td><td>7.0</td></tr><tr><td>max # of input figures</td><td>37.0</td></tr><tr><td>min # of input figures</td><td>0.0</td></tr><tr><td>ave. # of input tables</td><td>4.3</td></tr><tr><td>max # of input tables</td><td>53.0</td></tr><tr><td>min # of input tables</td><td>0.0</td></tr><tr><td>ave. # of reviewers per paper</td><td>3.8</td></tr><tr><td>max # of reviewers per paper</td><td>9.0</td></tr><tr><td>min # of reviewers per paper</td><td>3.0</td></tr><tr><td>ave. # of weaknesses per reviewer</td><td>4.8</td></tr><tr><td>max # of weaknesses per reviewer</td><td>39.0</td></tr><tr><td>min # of weaknesses per reviewer</td><td>1.0</td></tr><tr><td>ave. length of weakness (in words)</td><td>39.1</td></tr><tr><td>max length of weakness (in words)</td><td>371.0</td></tr><tr><td>min length of weakness (in words)</td><td>2.0</td></tr></table>
|
| 344 |
+
|
| 345 |
+
# C. More Experiment Results and Details
|
| 346 |
+
|
| 347 |
+
# C.1. Input Context Scaling Investigation
|
| 348 |
+
|
| 349 |
+
Figure 4, Figure 5, and Table 10 show the context scaling results of EQINFER, EXPDESIGN, and WEAKNESS.
|
| 350 |
+
|
| 351 |
+
Table 10: The performance comparison of different input processing methods for WEAKNESS. We use GPT-4o and GPT-4-Turbo because both accept a maximum of 128k tokens input. We also put the results of AI-SCI in the table for reference. Here, "split-combine" splits the input paper into several pieces, where each piece's length is denoted as "window size"; "no-split" means the conventional input cutting, for example, if the window size is 3,000, then only the first 3,000 words in the paper are used. According to the data statistics, 20,000 words can cover maximum lengths of more than $95\%$ of the papers in our dataset.
|
| 352 |
+
|
| 353 |
+
<table><tr><td>Models</td><td>Input Context Processing</td><td>Window Size (in words)</td><td>S-F1</td><td>S-Precision</td><td>S-Recall</td><td>ITF-IDF</td></tr><tr><td rowspan="3">GPT-4o</td><td>split-combine</td><td>3,000</td><td>47.73</td><td>42.09</td><td>55.48</td><td>5.95</td></tr><tr><td>no-split</td><td>3,000</td><td>45.74</td><td>43.45</td><td>48.54</td><td>5.92</td></tr><tr><td>no-split</td><td>20,000</td><td>45.47</td><td>42.97</td><td>48.51</td><td>6.02</td></tr><tr><td rowspan="3">AI-SCI</td><td>split-combine</td><td>3,000</td><td>45.05</td><td>40.02</td><td>51.91</td><td>2.23</td></tr><tr><td>no-split</td><td>3,000</td><td>42.56</td><td>40.90</td><td>44.65</td><td>2.53</td></tr><tr><td>no-split</td><td>20,000</td><td>42.53</td><td>40.75</td><td>44.78</td><td>2.58</td></tr></table>
|
| 354 |
+
|
| 355 |
+
# C.2. Human Evaluation on LLM-Generated Novel Experiments
|
| 356 |
+
|
| 357 |
+
Figure 6 illustrates the evaluation guideline for novel experiments generated by LLMs. We ask 3 senior PhD students to evaluate each paper; that is, if the first two annotators disagree with each other, a third annotator will make a final decision. Table 11 presents several human evaluation cases.
|
| 358 |
+
|
| 359 |
+
# C.3. Human Evaluation on LLM-Generated Explanation
|
| 360 |
+
|
| 361 |
+
We ask 5 annotators to evaluate the LLM-generated explanations. Specifically, each of them is assigned 4 or 5 papers, along with the corresponding experiment lists. For each paper, the annotator is given 5 different models' outputs (model names are anonymized), and the annotator has to decide if each LLM-generated explanation is acceptable according to the experiment. We show the human evaluation results in Table 5.
|
| 362 |
+
|
| 363 |
+
# C.4. Multi-Modal Input Ablation
|
| 364 |
+
|
| 365 |
+
We post the multi-modal ablation study of EXPDESIGN and WEAKNESS in Table 12 and Table 13.
|
| 366 |
+
|
| 367 |
+
# D. Data cases and Annotation Platform Illustration
|
| 368 |
+
|
| 369 |
+
As shown in Figure 8, 9, and 10, we show the sample cases of the three tasks in AAAR-1.0. Meanwhile, we illustrate the screenshot of our annotation platform in Figure 7.
|
| 370 |
+
|
| 371 |
+
# E. Prompt Templates
|
| 372 |
+
|
| 373 |
+
In this appendix, we attach all the prompts used in this work, including prompts in data collection and model prediction, as shown in Figure 11, 12, and 13.
|
| 374 |
+
|
| 375 |
+

|
| 376 |
+
(a) The review score distribution of the papers used in WEAKNESS.
|
| 377 |
+
|
| 378 |
+

|
| 379 |
+
(b) The track distribution of the papers used in WEAKNESS.
|
| 380 |
+
Figure 3: The data diversity illustration of WEAKNESS, including the score distribution and track distribution of the papers used in our dataset.
|
| 381 |
+
|
| 382 |
+

|
| 383 |
+
Figure 4: The input context length scaling trend on the EQINFER task.
|
| 384 |
+
|
| 385 |
+

|
| 386 |
+
|
| 387 |
+

|
| 388 |
+
Figure 5: The input context length scaling trend of different LLMs on the EXPDESIGN task.
|
| 389 |
+
|
| 390 |
+
<table><tr><td>For each paper, you are given this paper's human-annotated experiments (Column C), along with three different models' prediction experiments (Columns D, G, J)</td></tr><tr><td>Those model-generated experiments are all novel experiments that the original human-annotated experiments (Column C) didn't mention. And your task is to evaluate whether these novel experiments are good or not.</td></tr><tr><td>Based on the original paper and its experiments, pls rate the quality of each model-generated experiment.</td></tr><tr><td>A (necessary experiment): Label an experiment with "A" if you think this experiment is necessary for this paper.</td></tr><tr><td>A "necessary" experiment means if the authors don't include this experiment in the paper, this paper will be highly likely be rejected by the reviewer.</td></tr><tr><td>For example, if this paper proposes a novel neural adaptor model, then an ablation study is required to see if having the proposed adaptor can contribute to the performance.</td></tr><tr><td>B (optional experiment): label an experiment with "B", if you think this experiment is an optional choice for this paper.</td></tr><tr><td>For example, if a paper proposes a new metric learning algorithm, conducting a representation space visualization is not required but can be useful for enhancing the explainability of this algorithm.</td></tr><tr><td>C (unrelated experiment): label an experiment with "C" if you think this experiment is unrelated to the core motivation of this paper. Such as those fancy experiments that we can just omit without any impact.</td></tr><tr><td>Note that, if the model-generated experiments are too general, such as simply suggesting an "ablation study" without any details, then you can also categorise it as an unrelated experiment.</td></tr><tr><td>In the "Your Assessment" column, write down your assessment of the model-generated experiments,</td></tr><tr><td>For example, if there are five novel experiments, write a list with a length of 5: [A, B, C, A, B]</td></tr><tr><td>Leave any comments if you are not confident with any of your ratings.</td></tr></table>
|
| 391 |
+
|
| 392 |
+
Figure 6: The human guideline for evaluating the LLM-generated novel experiments.
|
| 393 |
+
|
| 394 |
+
<table><tr><td>A</td><td>B</td><td>C</td><td>D</td></tr><tr><td colspan="4">Here, I provide a suggested annotation pipeline:1. Click the PDF link (Column B, Google Drive link) and read the "Experiment" section of the paper you are going to annotate. If you are not familiar with this paper, we also encourage you to read the full paper.2. For each experiment within the "Experiment" section, try to answer the following two questions:- What experiments do you suggest doing? (column C in this sheet)- Why do you suggest these experiments? (column D in this sheet)Write the "suggestion-style" answers to the above two questions by making comments on the PDF file directly --- i.e., highlighting the related paragraphs/tables/figures (this comment location information is a crucial part of your annotation, which will be used to ask you to go to see my annotation examples for a better understanding.3. After finishing all the annotations on the PDF file, copy all your annotations into this sheet.4. Organize all the experiment suggestions into the list. For example, in columns C and D, you should write something like:1. AAA ...2. BBB ...3. CCC ...Make sure all your lists are consistent! For example, if you make 7 experiment comments in the PDF, make sure there are also 7 items in columns C and D in this sheet.I ask all of you to go to see my annotation sheet and please use the same annotation format as mine (e.g., how to write the list, how to make comments on the PDF).Other notes:Usually, we only consider the experiments in the paper's main body and exclude the appendix, unless you think the experiments in the appendix are also critical to this paper ---the author explicitly claimed the importance or frequently mentioned this experiment in the paper's main body.Paper TitlePDF LinkWhat experiments do you suggest doing?Why do you suggest these experiments?1. Few-shot instruction tuning coverage speed comparison across diff 1. To investigate whether the current LMs can truly understand the semantics2. Zero-shot instruction-following performances among different instruc 2. To see if different instructions can impact the models' zero-shot instruc3. Few-shot instruction-following performance among different instruti 3. To see if different instructions can impact the models' few-shot instruci4. The effect of the target words. The author should also investigate whether 4. To see if the model can truly follow instructions to solve the task or just1. Cross-task instruction-following performance evaluation: the authors s 1. To prove that the task instructions in the proposed dataset (in both train2. Ablation study on the different components of the task instruction: the 2. Since the author proposed various components for the task instructions</td></tr></table>
|
| 395 |
+
|
| 396 |
+
Figure 7: The annotation platform for collecting the annotation of EXPDESIGN. We ask annotators to first make comments on the Google Drive PDF, then move all the annotations to the online Google Doc (for further verification and discussion).
|
| 397 |
+
|
| 398 |
+
<table><tr><td>Context Before</td><td>Context After</td><td>Equation</td><td>Answer</td></tr><tr><td>In this paper, we investigate what types of stereotypical information are captured by pretrained language models. We present the first dataset comprising stereotypical attributes of a range of social groups and propose a method to elicit stereotypes encoded by pretrained language models in an unsupervised fashion. Moreover, we link the emergent stereotypes to their manifestation as basic emotions as a means to study their emotional effects in a more generalized manner [...]</td><td>We then define emotion vectors $\\hat{v} \\in\mathsf{mathcal{R}})^{\wedge}\{10\}$ for each group $TGT$ [...]</td><td>\\"textnormal{S}_({emo})\\"texttt{TGT})=\\"sum\\"limits^{"W_TGT}|_i=w\"(i)/({"W_TGT})\"</td><td>correct</td></tr><tr><td>In this paper, we investigate what types of stereotypical information are captured by pretrained language models. We present the first dataset comprising stereotypical attributes of a range of social groups and propose a method to elicit stereotypes encoded by pretrained language models in an unsupervised fashion. Moreover, we link the emergent stereotypes to their manifestation as basic emotions as a means to study their emotional effects in a more generalized manner [...]</td><td>We then define emotion vectors $\\hat{v} \\in \mathsf{mathcal{R}})^{\wedge}\{10\}$ for each group $TGT$ [...]</td><td>\\"textnormal{S}_({emo})\\"texttt{TGT})=\\"frac{1}{{"W_TGT}|_i}\\sum_{-w} \\in W_TGT}\){score(w, emo)}\\"</td><td>incorrect</td></tr></table>
|
| 399 |
+
|
| 400 |
+
Figure 8: Two sample cases of EQINFER.
|
| 401 |
+
|
| 402 |
+
Table 11: Examples of human evaluation on the model-generated novel experiments.
|
| 403 |
+
|
| 404 |
+
<table><tr><td>Paper Title</td><td>Original Experiments (by human)</td><td>Novel Experiment (by LLMs)</td><td>Rating</td></tr><tr><td>WiCE: Real-World Entailment for Claims in Wikipedia</td><td>1. Analysis in Verification Problem Distribution: This paper should provide detailed analysis and statistics about the verification problems in the proposed dataset.2. Off-the-shelf entailment classification performance: The authors should provide entailment classification performance of existing models on the proposed dataset without fine-tuning.3. Human Performance: The authors should show human performance on the proposed dataset.4. Performance of fine-tuned models: The authors should provide the performance of models fine-tuned on the proposed dataset.5. Performance on the evidence retrieval task: The authors should show the performance on the evidence retrieval task, which is a sub-task of the proposed dataset.6. Performance of LLMs: The authors should provide the performance of LLMs on the proposed dataset.7. Retrieval+Entailment: Authors should provide experiments on a framework of retrieving evidence sentences and evaluate entailment by using the retrieved sentences.8. Analysis of Claim-Split on Downstream Tasks: The authors should analyze how claim-split, the proposed method, is effective on tasks other than the proposed dataset.</td><td>Assess model performance on WiCE without fine-tuning to test domain generalization from traditional NLI datasets.</td><td>A</td></tr><tr><td>MetaMath: Bootstrap Your Own Mathematical Questions for Large Language Models</td><td>1. Results of multiple LLMs on popular math datasets: The authors should show the performance of multiple LLMs fine-tuned on their dataset on popular math datasets.2. Performance on open-source models with different sizes: The authors should show the performance of models with different sizes trained on the proposed dataset.3. Comparison to SOTA closed-source models: The authors show compare the performance of open-source models trained on the proposed dataset and strong close-source models.4. Evaluate the effect of augmentations: The authors need to perform an ablation study to compare the different argumentation methods they proposed.5. Analyze Training on Incorrect Answers: The authors should analyze whether wrong answers generated in data augmentation can harm the performance.6. Evaluate other ways to increase the size of training data: The authors should evaluate other ways to increase the training data size and compare the performance with models trained on their proposed train data.7. Error Analysis: The authors should analyze the performance of their models in different conditions (e.g., lengths of questions).</td><td>Prompt Sensitivity Analysis: Evaluate the sensitivity of MetaMath to different prompt formats or phrasings of mathematical questions.</td><td>B</td></tr><tr><td>Large Language Models Cannot Self-Correct Reasoning Yet</td><td>1. Self-Correction with Oracle Labels: The authors should evaluate self-correction performance with oracle labels.2. Intrinsic Self-Correction: The authors should show performance without using the oracle labels.3. Analysis of Mistakes in Self-Correction: The authors should analyze the properties of mistakes made in the self-correction framework.4. Multi-Agent Debate: The authors should evaluate self-correction with multi-agent debate.5. Prompt Design Analysis: The authors should analyze the influence of prompt design for the initial responses on self-correction performance.</td><td>Visualization of learned representations or attention mechanisms to provide insights into the model's inner workings.</td><td>C</td></tr></table>
|
| 405 |
+
|
| 406 |
+
Table 12: The figure inputs ablation of EXPDESIGN. For the maximum text input length, same as the setting in Table 2, we use 2,000 and 3,000 words for open- and closed-source models, respectively. For the closed-source GPT-4o and GPT-4, as they have long context window sizes, we use all the figures of each paper. While for InternVL2, we randomly select two figures per input paper.
|
| 407 |
+
|
| 408 |
+
<table><tr><td rowspan="2">Models</td><td colspan="3">Experiment Design</td><td colspan="3">Experiment Explanation</td></tr><tr><td>En-F1</td><td>En-Precision</td><td>En-Recall</td><td>S-Match</td><td>ROUGE-L</td><td>ROUGE-1</td></tr><tr><td>GPT-4o</td><td>25.03</td><td>22.25</td><td>36.59</td><td>58.54</td><td>29.25</td><td>35.50</td></tr><tr><td>w/ figures</td><td>25.39</td><td>24.35</td><td>32.80</td><td>58.53</td><td>27.87</td><td>34.30</td></tr><tr><td>InternVL2-26B</td><td>24.26</td><td>39.50</td><td>14.91</td><td>50.03</td><td>29.13</td><td>34.26</td></tr><tr><td>w/ figures</td><td>15.04</td><td>38.50</td><td>8.64</td><td>50.29</td><td>29.29</td><td>34.06</td></tr></table>
|
| 409 |
+
|
| 410 |
+
Table 13: The ablation study about the paper tables and figures of WEAKNESS. Based on the conclusion in Table 10, we use the "split Combine" to process the text input here (2,000 and 3,000 words context window size for open- and closed-source models). For GPT-4o, we use all the table/figure images; while for InternVL2, we randomly select two images per paper, i.e., two random figures, two random tables, or one random figure + table.
|
| 411 |
+
|
| 412 |
+
<table><tr><td>Models</td><td>S-F1</td><td>S-Precision</td><td>S-Recall</td><td>ITF-IDF</td></tr><tr><td>GPT-4o</td><td>47.73</td><td>42.09</td><td>55.48</td><td>5.95</td></tr><tr><td>w/ tables</td><td>46.76</td><td>41.32</td><td>54.17</td><td>5.53</td></tr><tr><td>w/ figures</td><td>46.62</td><td>41.20</td><td>54.04</td><td>5.48</td></tr><tr><td>w/ tables & figures</td><td>46.58</td><td>41.17</td><td>53.98</td><td>5.36</td></tr><tr><td>InternVL2-26B</td><td>41.91</td><td>41.02</td><td>43.28</td><td>1.48</td></tr><tr><td>w/ tables</td><td>40.55</td><td>40.37</td><td>42.91</td><td>1.46</td></tr><tr><td>w/ figures</td><td>42.88</td><td>42.10</td><td>43.76</td><td>1.46</td></tr><tr><td>w/ tables & figures</td><td>42.44</td><td>42.00</td><td>43.31</td><td>1.44</td></tr></table>
|
| 413 |
+
|
| 414 |
+
<table><tr><td>Pre-Experiment Context (Input)</td><td>Experiment Design (Output)</td><td>Motivation Explanation (Output)</td></tr><tr><td>In this paper, we show that Multilingual BERT (\\mbert{}), released by \cite{devlin2018bert} as a single language model pre-trained from monolingual corpora in 104 languages, is surprisingly good at zero-shot cross-lingual model transfer, in which task-specific annotations in one language are used to fine-tune the model for evaluation in another language [...]</td><td>1. Expand the definition of overlap. The authors should calculate overlap based on all the words shared between two languages, instead of just shared vocabulary on just the entities.
|
| 415 |
+
2. Report performance gains for using some popular language similarity criterion, e.g., WALS.
|
| 416 |
+
3. Effect of tokens per word. The authors should perform experiments on more scripts, specifically looking at the effect of words being split into multiple tokens.
|
| 417 |
+
4. Control for vocabulary overlap among languages. Choose languages that have large vocabulary overlap and different word order feature. Train on one set of languages and then perform zero shot evaluation on the rest.
|
| 418 |
+
5. Ablate the effect of common word pieces by using a non-overlapping tokenizer for different languages.</td><td>1. To check whether non-entity overlap between two languages also contribute to better performance on recognizing the entities. The model may use information from non-entity words to recognize an entity.
|
| 419 |
+
Additionally, successfully recognizing that a word is not an entity also contributes the performance on the NER task.
|
| 420 |
+
2. To understand which features the language model can exploit for cross-lingual transfer. This will give us insights into what typological similarity the multilingual language model can pick up during pretraining.
|
| 421 |
+
3. To understand the effect of POS label frequency. The idea is that two languages with similar token to word ratio will result in better cross-lingual transfer. The reason is that continuation tokens should be classified properly and the change in the training corpus of the frequency of continuation tokens will result in different performance.
|
| 422 |
+
4. To properly control for the effect of vocabulary overlap. Since large overlap in vocabulary can lead to performance gain, the reported results does not reflect the true impact of word order.
|
| 423 |
+
5. To understand the effect of structure of sentences in different languages for cross-lingual understanding of multilingual language models. Since there will be no overlap between different languages the model must learn cross-lingual representations based on syntactic and semantic properties of the languages.</td></tr></table>
|
| 424 |
+
|
| 425 |
+
Figure 9: A sample case of EXPDESIGN.
|
| 426 |
+
|
| 427 |
+
<table><tr><td>Paper Context (Input)</td><td>Weaknesses (Output)</td></tr><tr><td>A Neural Process (NP) (Garnelo et al., 2018a;b) meta-learns a stochastic process describing the relationship between inputs and outputs in a given data stream, where each task in the data stream consists of a meta-training set of input-output pairs and also a meta-validation set. The NP then defines an implicit stochastic process whose functional form is determined by a neural network taking the meta-training set as an input [...]</td><td>Reviewer#1:
|
| 428 |
+
1. The writing is not on par with the idea.
|
| 429 |
+
Reviewer#2:
|
| 430 |
+
1. It would be informative to see how MPNPs scale with higher dimensionality. For example, empirical comparisons on a high-D regression task complementing the 10 one.
|
| 431 |
+
2. The results of the Lotka-Volterra task would deserve further analysis: Why is BNP/BANP seemingly more apt at dealing with misspecification than MPNPs? My understanding is that model data-mismatch is a problem general to Bayesian inference, i.e., should also affect B(A)NP.
|
| 432 |
+
Reviewer#3:
|
| 433 |
+
1. The consistent outperformance of BNP/BANP over MPNP/MPANP weakens the central hypothesis of the paper.
|
| 434 |
+
2. The comparisons appear to be against relatively old versions of NPs. I wonder how the proposed method compares against more recent versions of NPs than ANPs (2018) and BNPs (2020), for instance Evidential Turing Processes (2022).
|
| 435 |
+
3. I find that the adaptation of the MPNP idea to CANP a bit dilutes the main message of the paper. It is after all a heavy pipeline with many components.
|
| 436 |
+
4. It is great that the paper points out the limitations of the presented method, but would be even better if it also gave an educated guess on which properties of the method cause them.</td></tr></table>
|
| 437 |
+
|
| 438 |
+
Figure 10: A sample case of WEAKNESS.
|
| 439 |
+
|
| 440 |
+
<table><tr><td>LLM-based Equation Synthesis</td><td>LLM-based Equation Filtering</td><td>Model Prediction</td></tr><tr><td>##### Task:You are asked to complete the equation in an NLP paper. Given the context before and after an equation, where the equation is deleted, you should help me recover that equation.</td><td rowspan="4">##### Task:You are given a source code of a latex equation. Based on your knowledge regarding the Machine Learning and NLP, you should help me identify if this equation has obvious flaw.</td><td rowspan="4">##### Task:You are given the latex source code of the context before and after an equation in an NLP paper, while this equation is masked. Your task is to identify the correctness of the given candidate equation.Only provide either 'Correct' or 'Wrong'. Avoid any explanations.</td></tr><tr><td>##### Requirements:1. Give me the latex source code of the missed the equation.2. Only give me the equation, avoid any other explanations.</td></tr><tr><td>##### Context Before:{The context before the equation}</td></tr><tr><td>##### Context After:{The context after the equation}.</td></tr><tr><td>##### Equation:{Left part of the ground truth equation}</td><td>##### Your Answer:</td><td>##### Equation:{equation}</td></tr><tr><td></td><td></td><td>##### Your Answer:</td></tr></table>
|
| 441 |
+
|
| 442 |
+
Figure 11: The prompts used in EQINFER, including both data collection and model prediction.
|
| 443 |
+
|
| 444 |
+
<table><tr><td>LLM-based Leaking Sentence Deletion</td><td>Model Prediction (Experiment Design)</td><td>Model Prediction (Motivation Explanation)</td></tr><tr><td>You are given a sentence (or a short paragraph) from an ML paper, along with a list of the experiments from this paper; help me decide whether this sentence discusses any experiments in the list. Let's say, if one sentence includes clues for coming up with any experiments in the list, we call this sentence a 'leaking sentence'; otherwise, if any experiment ideas cannot be inferred from the sentence, we call it a 'non-leak sentence'. Please give me a '1' if this sentence is a 'leaking sentence'; otherwise, give me a '0'. ### Experiment List: {The experiment list}. ### Sentence: {The sentence}. Now, give me your decision (give me either '0' or '1', only the number, without any explanations):</td><td>You are partially given an ML paper (in latex), including some useful sections (e.g., 'abstract' and 'introduction') having some basic introductions to the research of this paper, where all the 'experiment' related sections are deleted. Please first help me carefully read these sections and try to understand the motivations of this research, such as 'what the authors are trying to propose/demonstrate?' and 'what are the main contributions/differences of this paper from others?' Then, based on your in-depth understanding of this paper, imagine that you are the authors of this paper; what experiments do you have to conduct to prove your research? Namely, you have to "recover the deleted experiments** by providing me with **a list of experiment ideas**, where the list briefly summarizes the experiments the authors should conduct. Here is an example: {few-shot examples} Here is the target ML paper (partial content): {The context input}. Now, based on this paper, give me a list of experiments the author has to do. Please only give me the list, without any other words. ### Your Experiment List:</td><td>You are partially given an NLP paper (in latex), including some useful sections (e.g., 'abstract' and 'introduction') having some basic introductions to this research, where all the 'experiment' related sections are deleted. Meanwhile, you are also given a list of experiments that try to predict the missed experiments in this paper. Now, imagine the experiment list you created; you have to explain **why you suggested these experiments**. Here is an example experiment list: {few-shot examples} Here is the example corresponding explanation list: {few-shot examples} Now, help me look at the following paper: ### Paper: {The context input}. ### Experiment List: {The experiment list}. Please give me your explanation list, which should be the same length as the 'Experiment List'; the items of the two lists correspond one-to-one. Only give me the list without any other useless words. ### Explanation List:</td></tr></table>
|
| 445 |
+
|
| 446 |
+
Figure 12: The prompts used in EXPDESIGN, including both data collection and model prediction.
|
| 447 |
+
|
| 448 |
+
<table><tr><td>Model Prediction (Weaknesses)</td></tr><tr><td>You are given an NLP paper, along with its figure illustrations. Imagine you are a machine learning expert with rich research experience. Please carefully review this paper and identify the weaknesses of this research.</td></tr><tr><td>Here is the paper (it might be in partial content):</td></tr><tr><td>The context input.</td></tr><tr><td>Now, based on the provided context, give me a list of weaknesses of this research paper (such as '1. XXX\n2. XXX', one point per line).Note that if the given context is irrelevant to research, such as it is talking about 'acknowledgement', just generate 'No research content'.Please either give me the weakness list of this research paper or generate 'No research content' to clarify this is not a research paper, without any other words.</td></tr><tr><td>Your Answer:</td></tr></table>
|
| 449 |
+
|
| 450 |
+
Figure 13: The prompts used in WEAKNESS.
|
aaar10assessingaispotentialtoassistresearch/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d68d8ce0541bfd100bc43dbcbdf45dc2cf4f1e0c3a28c36241a3dabf7a5db0b
|
| 3 |
+
size 2114005
|
aaar10assessingaispotentialtoassistresearch/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5109a201ce296c857e2808c91f984400b10c298fdeaedfa00e98829b63dd5cad
|
| 3 |
+
size 539383
|
abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/be605c98-e986-4916-a1cf-5a2cf4d89930_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:672207b0ab3f9251544a74055040f1b97e714aee6def49ff0ecc720fdd50b794
|
| 3 |
+
size 123947
|
abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/be605c98-e986-4916-a1cf-5a2cf4d89930_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:13dc41e252e1be2190432c77945e1f50ef08ab05323986fadbe6c50ddfd31be7
|
| 3 |
+
size 147979
|
abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/be605c98-e986-4916-a1cf-5a2cf4d89930_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:359664e9e82636cddac56e31ee8dc1a4687b0ec5c42f8b36311a469125670446
|
| 3 |
+
size 591272
|
abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/full.md
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Ab Initio Nonparametric Variable Selection for Scalable Symbolic Regression with Large $p$
|
| 2 |
+
|
| 3 |
+
Shengbin Ye<sup>1</sup> <sup>2</sup> Meng Li<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Symbolic regression (SR) is a powerful technique for discovering symbolic expressions that characterize nonlinear relationships in data, gaining increasing attention for its interpretability, compactness, and robustness. However, existing SR methods do not scale to datasets with a large number of input variables (referred to as extreme-scale SR), which is common in modern scientific applications. This "large $p$ " setting, often accompanied by measurement error, leads to slow performance of SR methods and overly complex expressions that are difficult to interpret. To address this scalability challenge, we propose a method called PAN+SR, which combines a key idea of ab initio nonparametric variable selection with SR to efficiently pre-screen large input spaces and reduce search complexity while maintaining accuracy. The use of nonparametric methods eliminates model misspecification, supporting a strategy called parametric-assisted nonparametric (PAN). We also extend SRBench, an open-source benchmarking platform, by incorporating high-dimensional regression problems with various signal-to-noise ratios. Our results demonstrate that PAN+SR consistently enhances the performance of 19 contemporary SR methods, enabling several to achieve state-of-the-art performance on these challenging datasets.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
Symbolic regression (SR) is a mathematical technique for finding a symbolic expression that matches data from an unknown function. An early example of SR dates back to
|
| 12 |
+
|
| 13 |
+
$^{1}$ Department of Statistics, Rice University, Houston, TX, USA $^{2}$ Department of Statistics and Data Science, Northwestern University, Evanston, IL, USA. Correspondence to: Meng Li <meng@rice.edu>.
|
| 14 |
+
|
| 15 |
+
Proceedings of the $42^{nd}$ International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s).
|
| 16 |
+
|
| 17 |
+
the 1600s when Johannes Kepler used astronomical data to discover that Mars' orbit was elliptical. This discovery, along with Kepler's other parsimonious and analytically tractable laws of planetary motion, helped launch a scientific revolution.
|
| 18 |
+
|
| 19 |
+
With the recent progress in theoretical modeling and experimental instrumentation, researchers have entered a new era of big data. The development of SR models is particularly important, as they have emerged as a powerful tool for developing machine learning models that are intelligible, interpretable, and compact. Unlike large numerical models, the mathematical expressions used in SR models enable an easy understanding of their behavior, making them valuable in fields such as physics, where they can connect newly discovered physical laws with theory to facilitate subsequent theoretical developments (Wu & Tegmark, 2019). Moreover, SR models offer a safe and responsible option for machine learning applications with high societal stakes, such as those related to human lives, as they are well-suited for human interpretability and in-depth analysis. As such, SR models have found successful applications across a range of fields, including astrophysics (Lemos et al., 2023), chemistry and materials science (Hernandez et al., 2019; Liu et al., 2020; 2022), control (Derner et al., 2020), economics (Verstyuk & Douglas, 2022), mechanical engineering (Kronberger et al., 2018), medicine (Virgolin et al., 2020), and space exploration (Märtens & Izzo, 2022), among others (Matsubara et al., 2024).
|
| 20 |
+
|
| 21 |
+
SR literature has traditionally focused on datasets with low-dimensional inputs, often with $p \leq 10$ , and primarily considered only relevant variables—those used in the ground truth (La Cava et al., 2021; Kamienny et al., 2022; Shojaee et al., 2023; Tenachi et al., 2023; Li et al., 2024). In these settings, variable selection has not been critical, as SR has largely been viewed as an optimization problem under low-noise conditions. However, modern scientific applications increasingly involve datasets with far larger numbers of variables ( $p = 102$ to 459 in this work), often including irrelevant variables, rendering variable selection a critical yet underexplored concept in SR pipelines.
|
| 22 |
+
|
| 23 |
+
While variable selection is a well-established topic in statistics, its adoption in SR has been limited and its effective
|
| 24 |
+
|
| 25 |
+
ness in SR remains unclear. Existing approaches, such as random forest (RF)-based pre-selection in PySR (Cranmer, 2023), have demonstrated limited utility. Indeed, the PySR documentation explicitly notes that options like select_k_features are rarely used, suggesting that current methods are not well-suited to SR tasks. This observation is further supported by our analysis in Appendix D.2, where RF is shown to perform unsatisfactorily. The limited performance of off-the-shelf methods like RF highlights the unique challenges of variable selection in the context of SR. Unlike typical variable selection tasks, SR variable selection demands a near-zero false negative rate (FNR), as excluding even a single relevant variable from the search space prevents the recovery of the true underlying function. While false positives (FPs) primarily increase computational burden, they do not fundamentally impede the discovery of the underlying model. This asymmetry in performance requirements explains why standard methods often fall short and underscores the importance of designing variable selection methods specifically tailored to SR.
|
| 26 |
+
|
| 27 |
+
In this paper, we introduce a versatile framework, PAN+SR, for improving SR methods at extreme scales. PAN+SR leverages the Parametric Assisted by Nonparametrics (PAN) strategy (Ye et al., 2024) for an ab initio screening of large influx of input variables before expression synthesis, enabling SR tasks at extreme scales. In light of the unique challenge of SR pre-screening, we propose a novel non-parametric variable selection method designed to minimize FN; we refer to this method as PAN throughout this paper. Furthermore, to evaluate PAN+SR at extreme scales, we extend the open-source SR benchmarking database, SRBench (La Cava et al., 2021), with high-dimensional problems containing white noise at various signal-to-noise ratios. In Section 6, we showcase the performance uplift of 19 contemporary SR methods under PAN+SR. The PAN+SR framework is available as an open-source project at https://github.com/mattsheng/PAN_SR.
|
| 28 |
+
|
| 29 |
+
# 2. Background and Motivation
|
| 30 |
+
|
| 31 |
+
Given a dataset $(\pmb{y},\pmb{X})$ with target $\pmb{y} \in \mathbb{R}^n$ and features $\pmb{X} = (\pmb{x}_1,\dots,\pmb{x}_p) \in \mathbb{R}^{n\times p}$ , SR assumes the existence of an analytical data-generating function that links $\pmb{X}$ to $\pmb{y}$ :
|
| 32 |
+
|
| 33 |
+
$$
|
| 34 |
+
y _ {i} = f _ {0} \left(x _ {i 1}, \dots , x _ {i p}\right) + \varepsilon_ {i}, \quad \text {f o r} \quad i = 1, \dots , n, \tag {1}
|
| 35 |
+
$$
|
| 36 |
+
|
| 37 |
+
in the presence of observation noise $\varepsilon_{i}$ . The goal of SR is to recover the unknown regression function $f_{0}(\cdot)$ symbolically. For example, consider regressing the gravitational force between two objects, $F$ , on their masses $(m_{1}, m_{2})$ and the distance between their centers $(r)$ . An SR algorithm would ideally re-discover the Newton's Law of Universal Gravitation, $F = 6.6743 \times 10^{-11} \cdot m_{1}m_{2} / r^{2}$ . This is typically done by randomly constructing mathematical expressions using the
|
| 38 |
+
|
| 39 |
+
features, $\mathbf{X} = (m_{1}, m_{2}, r)$ in this case, and a set of mathematical operations, e.g., $\mathcal{O} = \{+, -, \times, \div, \exp, \log, \cdot^{2}\}$ . Even for this low-dimensional problem, it has been shown that exploring all expressions $\mathcal{F}(\mathbf{X}, \mathcal{O})$ , induced by $\mathbf{X}$ and $\mathcal{O}$ , is NP-hard (Virgolin & Pissis, 2022). Hence, typical SR algorithms only traverse through a small subset of the full search space, such as limiting the complexity of the candidate SR models, total runtime, number of mathematical operations, etc.
|
| 40 |
+
|
| 41 |
+
In realistic scientific applications, particularly in the era of big data, scientists often include as many intuitively reasonable features as possible, many of which may be irrelevant to the target $\mathbf{y}$ . This practice causes the search space $\mathcal{F}(X, \mathcal{O})$ to expand double-exponentially quick (Ye et al., 2024), making it extremely challenging—if not impossible—to recover $f_0(\cdot)$ using algorithmic approaches alone. To this end, we propose the PAN+SR framework, which integrates the non-parametric module of PAN as a model-based pre-screening step. This framework excludes irrelevant features prior to applying SR methods, thereby mitigating the explosion of the search space in high-dimensional problems. Here, we assume that a high-dimensional SR problem in (1) can be reduced to
|
| 42 |
+
|
| 43 |
+
$$
|
| 44 |
+
y _ {i} = f _ {0} \left(\boldsymbol {X} _ {i, S _ {0}}\right) + \varepsilon_ {i}, \quad \text {f o r} \quad i = 1, \dots , n, \tag {2}
|
| 45 |
+
$$
|
| 46 |
+
|
| 47 |
+
where only a small subset $S_0$ of $p_0 = |\mathcal{S}_0| \ll p$ of features exert influence on $\pmb{y}$ . Then the oracle search space $\mathcal{F}(X_{\mathcal{S}_0},\mathcal{O})$ is a significantly smaller subspace of the full search space $\mathcal{F}(\boldsymbol {X},\mathcal{O})$ . Thus, the successful identification of $S_0$ , or at least a superset of $S_0$ , is critical for reducing high-dimensional SR problems into manageable low-dimensional ones. With this reduction, the dataset $(\pmb {y},\pmb{X}_{\mathcal{S}_0})$ becomes sufficient for discovering $f_{0}(\cdot)$ , enabling SR methods to handle high-dimensional problems without requiring any modifications to their algorithms.
|
| 48 |
+
|
| 49 |
+
# 3. Related Work
|
| 50 |
+
|
| 51 |
+
SRBench (La Cava et al., 2021) is a reproducible and open-source benchmarking platform for SR that has made significant strides in the field through its curation of 122 real-world datasets and 130 ground-truth problems and its comprehensive evaluations of 14 contemporary SR methods. SRBench has quickly gained adaptations with numerous studies leveraging it to evaluate accuracy, exact solution rate, and solution complexity (Kamienny et al., 2022; Landajuela et al., 2022; Kamienny et al., 2023; Keren et al., 2023; Shojaee et al., 2023; Makke & Chawla, 2024). Despite its widespread use, SRBench primarily focuses on low-dimensional problems, which limits its applicability in the context of high-dimensional problems, a hallmark of the era of big data. In particular, the 130 ground-truth problems from the Feynman Symbolic Regression Database (Udrescu & Tegmark, 2020)
|
| 52 |
+
|
| 53 |
+
and the ODE-Strogatz repository (Strogatz, 2015) contain only the oracle features $X_{S_0}$ with at most $p = 9$ features. This low and narrow dimensional scope leaves SRBench less suited for analyzing SR at extreme scales, underscoring the need for a high-dimensional SR database.
|
| 54 |
+
|
| 55 |
+
# 4. Method
|
| 56 |
+
|
| 57 |
+
Inspired by PAN, the PAN+SR framework utilizes a one-step nonparametric variable selection strategy to pre-screen a high-dimensional dataset $(\pmb{y},\pmb{X})$ and parse the reduced dataset $(\pmb{y},\pmb{X}_{\widehat{\mathcal{S}}})$ to SR methods for subsequent expression synthesis and selection. Unlike traditional variable selection literature, where the primary focus is controlling the false discovery rates, the PAN criterion calls for minimizing the false negative rate (FNR) while controlling the false positive rate (FPR) is secondary. In other words, the selected set of features $\widehat{\mathcal{S}}$ should be a superset of $S_0$ and as small as possible. When $\widehat{\mathcal{S}}$ fails to be the superset of $S_0$ (i.e., there is at least one FN), the reduced search space $\mathcal{F}(\pmb{X}_{\widehat{\mathcal{S}}},\mathcal{O})$ no longer contains $f_0(\cdot)$ , rendering any subsequent discovery based on $\pmb{X}_{\widehat{\mathcal{S}}}$ to be false.
|
| 58 |
+
|
| 59 |
+
Nonparametric or model-free variable selection has been extensively studied in the literature. Lafferty and Wasserman (2008) propose the RODEO method for nonparametric variable selection through regularization of the derivative expectation operator. Candès et al. (2018) propose a model-free knockoff procedure controlling FDR with no assumptions on the conditional distribution of the response. Fan et al. (2011) propose a sure independence screening method for B-spline additive model. In the Bayesian literature, Bleich et al. (2014) design permutation tests for variable inclusion proportion of Bayesian Additive Regression Tree (BART); Liu et al. (2021) deploy spike-and-slab priors directly on the nodes of Bayesian forests.
|
| 60 |
+
|
| 61 |
+
Despite this diverse array of methods, few meet the unique proposition of the PAN criterion. Among the few recent methods investigated in Ye et al. (2024), they found BART-G.SE (Bleich et al., 2014), a BART-based permutation variable selection method, to be particularly suitable for PAN. However, our comprehensive simulation study in Appendix D.2 reveals that BART-G.SE, along with three other methods, exhibit insufficient TPR, particularly under noisy or low-sample-size conditions. This deficiency renders these methods unsuitable for the PAN+SR framework.
|
| 62 |
+
|
| 63 |
+
In this paper, we introduce a novel BART-based variable selection method and demonstrate its PAN criterion consistency through an extensive simulation study in Section 6.2. The key idea behind BART is to model the regression func
|
| 64 |
+
|
| 65 |
+
tion $f_0(\cdot)$ by a sum of regression trees,
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
\boldsymbol {y} = \sum_ {i = 1} ^ {M} \mathcal {T} _ {i} \left(\boldsymbol {x} _ {1}, \dots , \boldsymbol {x} _ {p}\right) + \varepsilon , \quad \varepsilon \sim \mathcal {N} _ {n} \left(\boldsymbol {0}, \sigma^ {2} \boldsymbol {I} _ {n}\right), \tag {3}
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
where each regression tree $\mathcal{T}_i(x_1,\ldots ,x_p)$ partitions the feature space based on the values of $x_{1},\ldots ,x_{p}$ . For each posterior sample, we calculate the proportion of splits in the ensemble (3) that use $x_{j}$ as the splitting variable, for $j = 1,\dots ,p$ . The variable inclusion proportion (VIP) $q_{j}$ of $x_{j}$ is then estimated as the posterior mean of these proportions across all posterior samples (Chipman et al., 2010). Intuitively, $q_{1},\ldots ,q_{p}$ encode the relative importance of each feature, where a large VIP $q_{j}$ suggests $x_{j}$ being an important driver of the response $\pmb{y}$ . However, deciding on how large a VIP value must be to indicate relevance remains a challenge. For instance, BART-G.SE addresses this by using a permutation test on $q_{1},\ldots ,q_{p}$ to identify significant features, whereby controlling the family-wise error rate.
|
| 72 |
+
|
| 73 |
+
Here, we propose an alternative approach that utilizes the rankings of VIPs instead of their raw values. Specifically, let $r_j$ denote the ranking of the VIP $q_j$ . Relevant features $X_{S_0}$ are expected to occupy top-ranking positions, namely $\{1, \ldots, p_0\}$ , due to their strong associations with $y$ . In contrast, irrelevant features $X_{S_1}$ , $S_1 = [p] \setminus S_0$ , are expected to appear in lower-ranking positions, namely $\{p_0 + 1, \ldots, p\}$ , since they are only selected sporadically or by chance (Chipman et al., 2010; Bleich et al., 2014). Consequently, a natural decision rule is to select feature $x_j$ if $r_j$ falls within $\{1, \ldots, p_0\}$ .
|
| 74 |
+
|
| 75 |
+
However, this decision rule is impractical in real-world applications since the sparsity $p_0$ is unknown. To address this limitation, we propose a method that leverages multiple independent runs of BART to estimate the feature rankings more robustly. Let $r_{j,k}$ denote the VIP ranking of $\boldsymbol{x}_j$ in the $k$ th run. Assume that the rankings of $\boldsymbol{x}_j$ are randomly distributed over the $K$ independent runs (see Appendix D.1 for empirical justification):
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
r _ {j, 1}, \ldots , r _ {j, K} \stackrel {{\text {i i d}}} {{\sim}} \left\{ \begin{array}{l l} \operatorname {U n i f} (\{1, \ldots , p _ {0} \}), & \text {i f} j \in \mathcal {S} _ {0} \\ \operatorname {U n i f} (\{p _ {0} + 1, \ldots , p \}), & \text {i f} j \notin \mathcal {S} _ {0} \end{array} \right.
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
Then the average ranking $\bar{r}_j = \sum_{k=1}^{K} r_{j,k} / K$ of $\pmb{x}_j$ across $K$ independent runs forms two distinct clusters, $\mathcal{C}_0$ for $X_{S_0}$ and $\mathcal{C}_1$ for $X_{S_1}$ . Specifically, $\bar{r}_j$ . for $X_{S_0}$ are expected to cluster in $\mathcal{C}_0$ with mean $(1 + p_0) / 2$ , while those for $X_{S_1}$ tend to cluster in $\mathcal{C}_1$ with mean $(p_0 + 1 + p) / 2$ . Although both cluster means are unknown due to the unknown sparsity $p_0$ , their separation can be identified using clustering techniques.
|
| 82 |
+
|
| 83 |
+
To illustrate, consider the extended Feynman I-38-12 dataset (defined in Section 5.2) with $p = 204$ features, of which
|
| 84 |
+
|
| 85 |
+
$p_0 = 4$ are relevant. Without loss of generality, we assume that the relevant features $X_{S_0}$ are $\boldsymbol{x}_1,\boldsymbol{x}_2,\boldsymbol{x}_3,\boldsymbol{x}_4$ , i.e., $S_0 = \{1,2,3,4\}$ and $S_{1} = \{5,\dots ,204\}$ . When $K = 20$ independent BART models are trained on the dataset, the rankings $r_{1,k},r_{2,k},r_{3,k},r_{4,k}$ frequently fall within $\{1,2,3,4\}$ across all $k = 1,\ldots ,20$ runs. This is because the relevant features are frequently selected for tree splits due to their strong associations with the response variable $\mathbf{y}$ , leading to high VIPs and consistently top rankings. In contrast, irrelevant features $\boldsymbol{x}_5,\dots ,\boldsymbol{x}_{204}$ are included sporadically in BART, with $r_{5,k},\dots ,r_{204,k}$ distributed randomly across $\{5,\dots ,204\}$ . As evident in Figure 5 in Appendix D.1, the average VIP rankings $\bar{r_j}$ of the relevant features form a low-mean cluster $\mathcal{C}_0$ with a cluster mean of $(1 + p_0) / 2 = 2.5$ , while those of the irrelevant features form a high-mean cluster $\mathcal{C}_1$ , concentrating around $(p_0 + 1 + p) / 2 = 104.5$ .
|
| 86 |
+
|
| 87 |
+
However, the sparse regression setting naturally leads to a class imbalance problem as $|\mathcal{C}_0| = p_0$ is much smaller than $|\mathcal{C}_1| = p - p_0$ . To this end, we propose to apply agglomerative hierarchical clustering (AHC) with Euclidean distance and average linkage to $(\bar{r}_1, \dots, \bar{r}_{p^{\cdot}})$ and cut the dendrogram to form two clusters: $\widehat{\mathcal{C}}_0$ and $\widehat{\mathcal{C}}_1$ . Then, features in $\widehat{\mathcal{C}}_0$ are retained, while those in $\widehat{\mathcal{C}}_1$ are discarded. Notably, the proposed data-driven selection criterion does not require any knowledge about the sparsity level $p_0$ or a tunable selection threshold. An ablation study evaluating the effect of different clustering algorithms on selection accuracy is available in Appendix D.3. We herein refer to this variable selection method for SR pre-screening as PAN; see Appendix C.2 for implementation details.
|
| 88 |
+
|
| 89 |
+
# 5. Experiment Design
|
| 90 |
+
|
| 91 |
+
Using an open-source benchmarking platform, SRBench, we evaluate the PAN+SR framework on two separate tasks. First, we assess its ability to make accurate predictions on "black-box" regression problems in which the underlying regression function remains unknown. Second, we test PAN+SR's ability to find the correct data-generating function $f_{0}$ on synthetic datasets with known data-generating functions originating from Feynman Lectures on Physics (Feynman et al., 2010; Udrescu & Tegmark, 2020).
|
| 92 |
+
|
| 93 |
+
The experiment settings are summarized in Table 1. All experiments were run on a heterogeneous cluster. Each algorithm was trained on each dataset in 10 repeated trials with a different random state to control both the train/test split and the seed of the algorithm. Each run was performed until a 24-hour time limit was reached or up to 500,000 expression evaluations for black-box problems or 1,000,000 for ground-truth problems. For ground-truth problems, we chose a few representative algorithms in the black-box problems and investigated additional settings of sample size and
|
| 94 |
+
|
| 95 |
+
signal-to-noise ratio. Datasets were split $75\% / 25\%$ in training and testing. For black-box problems, hyperparameters were either set to the optimal values published by SRBench or to values recommended by the original authors of the respective methods. The best hyperparameter settings in black-box regression problems were used in ground-truth problems. Instructions for reproducing the experiment is available in Appendix A, and detailed experimental settings are described in Appendix C.
|
| 96 |
+
|
| 97 |
+
# 5.1. Symbolic Regression Methods
|
| 98 |
+
|
| 99 |
+
Here we summarize the SR methods evaluated in this paper. A long strand of SR methods is based on genetic programming (GP), a technique for evolving executable data structures, such as expression trees. The most vanilla version we test is gplearn (Stephens, 2020), which performs random expression proposal and iterates through the steps of tournament selection, mutation, and crossover. Advanced GP-based methods utilize different evolutionary strategies and optimization objectives, ranging from Pareto optimization for efficient trade-offs between accuracy and model complexity to program semantics optimization for increasing coherence in expression. Here we test an array of advanced GP-based SR algorithms, including Age-Fitness Pareto optimization (AFP) (Schmidt & Lipson, 2010), AFP with co-evoloved fitness estimate (AFP_FE) (Schmidt & Lipson, 2010), Epigenetic Hill Climber (EHC) (La Cava et al., 2014), $\varepsilon$ -lexicase selection (EPLEX) (La Cava et al., 2019a), Feature Engineering Automation Tool (FEAT) (La Cava et al., 2019b), Fast Function Extraction (FFX) (McConaghy, 2011), GP version of Gene-pool Optimal Mixing Evolutionary Algorithm (GP-GOMEA) (Virgolin et al., 2021), Interaction-Transformation Evolutionary Algorithm (ITEA) (de Franca & Aldeia, 2021), Multiple Regression Genetic Programming (MRGP) (Arnaldo et al., 2014), Operon (Burlacu et al., 2020), PySR (Cranmer, 2023), and Semantic Back-propagation Genetic Programming (SBP-GP) (Virgolin et al., 2019).
|
| 100 |
+
|
| 101 |
+
Additional methods include Bayesian Symbolic Regression (BSR) (Jin et al., 2020), which places a prior on the expression tree; Deep Symbolic Regression (DSR) (Petersen et al., 2021), Unified Deep Symbolic Regression (uDSR) (Landajuela et al., 2022), and Dynamic Symbolic Network (DySymNet) (Li et al., 2024) utilize recurrent neural networks to propose symbolic expressions; Transformer-based Planning for Symbolic Regression (TPSR) (Shojae et al., 2023) leverages pretrained transformer models; AIFeynman 2.0 (Udrescu et al., 2020) which uses a divide-and-conquer technique to recursively decomposing complex problems into lower-dimensional sub-problems.
|
| 102 |
+
|
| 103 |
+
Table 1: Settings used in the experiments.
|
| 104 |
+
|
| 105 |
+
<table><tr><td>SETTING</td><td>BLACK-BOX PROBLEMS</td><td>GROUND-TRUTH PROBLEMS</td></tr><tr><td># OF DATSETS</td><td>35</td><td>100</td></tr><tr><td># OF ALGORITHMS</td><td>19</td><td>19</td></tr><tr><td># OF TRIALS PER DATASET</td><td>10</td><td>10</td></tr><tr><td>TRAIN/TEST SPLIT</td><td>.75/.25</td><td>.75/.25</td></tr><tr><td>TERMINATION CRITERIA</td><td>500K EVALUATIONS OR 24 HOURS</td><td>1M EVALUATIONS OR 24 HOURS</td></tr><tr><td>SAMPLE SIZE</td><td>ALL</td><td>500, 1000, 1500, 2000</td></tr><tr><td>SIGNAL-TO-NOISE RATIO</td><td>NONE</td><td>0.5, 1, 2, 5, 10, 15, 20, NONE</td></tr><tr><td>TOTAL COMPARISONS</td><td>12250</td><td>142000</td></tr><tr><td>COMPUTATION COST</td><td>34K CORE HOURS</td><td>104K CORE HOURS</td></tr><tr><td>MEMORY ALLOCATION</td><td>16 GB</td><td>16 GB</td></tr></table>
|
| 106 |
+
|
| 107 |
+
# 5.2. Datasets
|
| 108 |
+
|
| 109 |
+
We curated a database of high-dimensional regression problems for testing the capability of PAN+SR. We selected 35 black-box regression problems available in PMLB v1.0 (Romano et al., 2021) using the following criteria: $n < 200$ and $p \geq 10$ or $n \geq 200$ and $p \geq 20$ . These problems were used in SRBench and overlap with various open-source repositories, including OpenML (Vanschoren et al., 2014) and the UCI Machine Learning Repository (Kelly et al., 2013).
|
| 110 |
+
|
| 111 |
+
We also curated 100 high-dimensional ground-truth regression problems by modifying the Feynman Symbolic Regression Database (Udrescu & Tegmark, 2020) to include irrelevant features and white noise. For each equation $f_{0}(\cdot)$ in the Feynman Lectures on Physics, we generated the relevant features $X_{S_0}$ following Udrescu and Tegmark (2020):
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\left(x _ {1, j}, \dots , x _ {n, j}\right) \stackrel {\text {i d}} {\sim} \operatorname {U n i f} \left(a _ {j}, b _ {j}\right), \quad \text {f o r} 1 \leq j \leq p _ {0}, \tag {4}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
where $p_0 = |\mathcal{S}_0|$ is the number of relevant features, $n$ is the sample size, and $a_j$ and $b_j$ are the lower and upper bounds for feature $x_j$ described in Udrescu and Tegmark (2020). To study the effect of noise on PAN+SR, we tuned the signal-to-noise ratio (SNR) by adding a Gaussian error term when generating the response variable:
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
y _ {i} = f _ {0} \left(x _ {i, 1}, \dots , x _ {i, p _ {0}}\right) + \varepsilon_ {i}, \quad \text {f o r} 1 \leq i \leq n, \tag {5}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
where $\varepsilon_{i}\stackrel {\mathrm{iid}}{\sim}N(0,\sigma_{\varepsilon}^{2})$ $\sigma_{\varepsilon}^{2} = \sigma_{f}^{2} / \mathrm{SNR}$ . When $\sigma_{\varepsilon}^{2} = 0$ or $\mathrm{SNR} = \infty$ (4) and (5) generate the original Feynman Symbolic Regression Database.
|
| 124 |
+
|
| 125 |
+
In addition to the relevant features $X_{\mathcal{S}_0} = (x_1,\dots ,x_{p_0})$ we included an array of irrelevant features $X_{\mathrm{irr}}$ representing the era of big data where all reasonable features are included in the dataset. Specifically, for each relevant feature $\boldsymbol {x}_j$ $j\in S_0$ we generate $(\boldsymbol{x}_{j,\mathrm{irr}}^{1},\ldots ,\boldsymbol{x}_{j,\mathrm{irr}}^{s})\stackrel {\mathrm{id}}{\sim}\mathrm{Unif}(a_j,b_j)$ representing $s$ copies of independent and irrelevant features coming from the same distribution as $\boldsymbol {x}_j$ . Then, the final feature matrix is $\pmb {X} = [X_{\mathcal{S}_0},X_{\mathrm{irr}}^1,\dots ,X_{\mathrm{irr}}^{p_0}]\in \mathbb{R}^{n\times p}$ ,where
|
| 126 |
+
|
| 127 |
+
$\pmb{X}_{\mathrm{irr}}^{j} = (\pmb{x}_{j,\mathrm{irr}}^{1},\dots ,\pmb{x}_{j,\mathrm{irr}}^{s})\in \mathbb{R}^{n\times s}$ is the irreverent feature matrix induced by the $j$ th relevant feature for $j = 1,\ldots ,p_0$ totaling $p = p_0(1 + s)$ features. In Section 6.2, we fix $s = 50$ so the total number of features is $p = 51p_{0}$ . Additional dataset information and sampling process are available in Appendix B.
|
| 128 |
+
|
| 129 |
+
Besides the 3,200 distinct simulation settings described in Table 1 (100 datasets, 8 SNRs, and 4 sample sizes), we include additional simulation settings in Appendix D.4 to further assess PAN+SR's behavior under alternative feature structures. These include (1) additive noise in features, (2) duplicated features, and (3) correlated features.
|
| 130 |
+
|
| 131 |
+
# 5.3. Metrics
|
| 132 |
+
|
| 133 |
+
Predictive Accuracy We assessed predictive accuracy using the coefficient of determination, defined as
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
R ^ {2} = 1 - \frac {\sum_ {i = 1} ^ {n} (y _ {i} - \widehat {y} _ {i}) ^ {2}}{\sum_ {i = 1} ^ {n} (y _ {i} - \bar {y}) ^ {2}}.
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
Model Complexity In line with SRBench, we define model complexity as the total number of mathematical operators, features, and constants in the model. To avoid redundancy, symbolic models are first simplified using SymPy (Meurer et al., 2017), a Python library for symbolic mathematics.
|
| 140 |
+
|
| 141 |
+
Solution Criteria For ground-truth regression problems, we follow SRBench's definition of symbolic solution. A model $\widehat{f}(\mathbf{X})$ is considered a solution to the SR problem of $y = f_0(\mathbf{X}) + \varepsilon$ if $\widehat{f}(\mathbf{X})$ does not reduce to a constant and (1) $\widehat{f} - f_0 = a$ for some $a \in \mathbb{R}$ or (2) $\widehat{f} / f_0 = b$ for some $b \neq 0$ . That is, the predicted model $\widehat{f}$ only differs from the true model $f_0$ by either an additive or a multiplicative constant.
|
| 142 |
+
|
| 143 |
+
While predictive accuracy can be influenced by the simulation design, the symbolic solution criterion offers a more reliable metric for assessing whether an SR method can uncover the true data-generating process. However, since
|
| 144 |
+
|
| 145 |
+

|
| 146 |
+
Figure 1: Results on the black-box regression problems. Points indicate the mean test set performance and bars represent the $95\%$ confidence intervals. Training time for PAN+SR includes the runtime of PAN, which averages only 74.14 seconds.
|
| 147 |
+
|
| 148 |
+
SymPy's simplification process is not always optimal, it is possible that some symbolic solutions are not identified in the process.
|
| 149 |
+
|
| 150 |
+
Feature Usage Accuracy The irrelevant features present a unique challenge for SR methods to identify the correct data-generating model $f_{0}$ . When the predictive model $\widehat{f}$ includes irrelevant features (FPs), it cannot be considered a symbolic solution to $f_{0}$ . Conversely, if $\widehat{f}$ excludes some relevant features (FNs), it also fails to meet the symbolic solution criteria. Although neither FPR nor FNR corresponds directly to symbolic solution rate, they can provide insights into why $\widehat{f}$ does not qualify as a symbolic solution.
|
| 151 |
+
|
| 152 |
+
# 6. Results
|
| 153 |
+
|
| 154 |
+
# 6.1. Blackbox Datasets
|
| 155 |
+
|
| 156 |
+
Figure 1 shows that PAN+SR consistently improves test set $R^2$ across 18 out of 19 SR algorithms, with the largest gains observed in lower-performing methods such as BSR, AIfeynman, and ITEA. For top-performing SR algorithms, the improvements are more modest due to the natural upper limit of $R^2$ , but the uplift remains significant. For instance, PAN boosted uDSR from 14th to 5th place in the overall ranking and to 2nd among the standalone SR methods. Furthermore, these $R^2$ improvements are not accompanied by increased model complexity. In some cases, PAN+SR even reduces model complexity, enhancing both parsimony and interpretability.
|
| 157 |
+
|
| 158 |
+
In addition to accuracy gain, PAN+SR significantly reduces training times for several SR algorithms, including SBP-GP, uDSR, AFP_FE, AIfeynman, and BSR. Notably, AIfeynman, the 2nd slowest running SR algorithm, achieves a 5-fold speedup (from 71250 seconds to 13997 seconds), while uDSR benefits from nearly a 3-fold speedup (from 7628 seconds to 2612 seconds) with PAN pre-screening. The computational overhead introduced by PAN is minimal, averaging only 74.14 seconds on a single core. As PAN relies on independent MCMC chains, this overhead can be further reduced through parallel processing, making PAN+SR both efficient and scalable.
|
| 159 |
+
|
| 160 |
+
# 6.2. Ground-truth Datasets
|
| 161 |
+
|
| 162 |
+
Figure 2 summarizes performance on the ground-truth regression problems with $n = 1000$ , $\mathrm{SNR} = \infty$ , and $s = 50$ . Methods are sorted by their standalone $R^2$ on the test set. PAN+SR consistently improves both $R^2$ and solution rate across all 19 SR methods. Due to the high dimensionality of the ground-truth problems, the standalone AIfeynman encountered out-of-memory errors and failed to complete any of the 1000 runs. However, PAN significantly improves AIfeynman's performance, lifting it from last place to 2nd overall in symbolic solution rate. Furthermore, PAN consistently outperforms all other nonparametric variable selection methods tested, achieving the highest TPR among four other methods and delivering the best $R^2$ when paired with SR, as detailed in Appendix D.2. This underscores the effectiveness and necessity for nonparametric pre-screening in
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
Figure 2: Results on the ground truth regression problems with $n = 1000$ , $\mathrm{SNR} = \infty$ , and $s = 50$ . Points indicate the mean test set performance and bars represent the $95\%$ confidence intervals. Training time for $\mathrm{PAN} + \mathrm{SR}$ includes the runtime of PAN, which averages 325 seconds. AIfeynman fails to complete any run in the standalone setting.
|
| 166 |
+
|
| 167 |
+
high-dimensional SR problems.
|
| 168 |
+
|
| 169 |
+
Similar to our findings in the black-box regression problems, this performance gain is not driven by increased model size, and PAN's average computational overhead of 325 seconds remains insignificant to many SR methods. Remarkably, uDSR benefited from nearly a 6-fold speedup with PAN (from 9573 seconds to 1596 seconds) while almost doubling its solution rate (from $36.6\%$ to $71.8\%$ ), making it the best performer in solution rate. Additionally, PAN elevated several mid-tier performers such as Operon, AFP_FE, AFP, and EHC, enabling them to surpass the 4th place method, GP-GOMEA, in the standalone SR solution rate ranking.
|
| 170 |
+
|
| 171 |
+
Beyond the specific simulation setting of $n = 1000$ and $\mathrm{SNR} = \infty$ , we also investigated the sensitivity of $\mathrm{PAN} + \mathrm{SR}$ across a range of sample sizes and SNR. In particular, we evaluated $\mathrm{PAN} + \mathrm{SR}$ with all combinations of sample size $n \in \{500, 1000, 1500, 2000\}$ and $\mathrm{SNR} \in \{0.5, 1, 2, 5, 10, 15, 20, \infty\}$ . Given the extreme computational burden, we select Operon, the best-performing algorithm in black-box regression problems, to be the SR module for the sensitivity analysis.
|
| 172 |
+
|
| 173 |
+
Figure 3a demonstrates that both Operon and PAN+Operon maintain consistently lower FPR across all settings of $n$ and SNR, with negligible differences between them. This low FPR reflects the rare inclusion of irrelevant features in the final symbolic models. In noisy settings, we notice a significant increase in PAN's FPR, from $0\%$ at $\mathrm{SNR} = \infty$ to over $30\%$ at $\mathrm{SNR} = 0.5$ . While this noise sensitivity could
|
| 174 |
+
|
| 175 |
+
be a concern for typical variable selection applications, it is crucial to emphasize that PAN's primary objective is to scale up SR methods by reliably identifying a superset of the relevant features $S_0$ . In this context, minimizing FNs during pre-screening is more critical than avoiding FPs.
|
| 176 |
+
|
| 177 |
+
Figure 3b illustrates that PAN achieves a near $0\%$ FNR across most simulation settings, highlighting its ability to identify a superset of the true feature set $S_0$ . This is crucial to ensure that the pre-screened dataset $(y, X_{\hat{S}})$ used for subsequent SR modeling is comprehensive enough to generate the correct expression $f_0$ . However, in the most extreme case, where $n = 500$ and $\mathrm{SNR} = 0.5$ , PAN's FNR rises to over $5\%$ , and caution is advised when relying on PAN in such cases. On the other hand, the standalone Operon often fails to include all relevant features in its final models across all $n$ and SNR settings, while PAN consistently lower Operon's FNR, enhancing its chance to identify the true function $f_0$ . Even with PAN, Operon fails to achieve the best-case FNR set by PAN, particularly under noisy conditions. This elevated FNR negatively impacts Operon's solution rate. For example, changing SNR from $\infty$ to 10, $\mathrm{PAN + Operon}$ 's average solution rate drops from $27.4\%$ to $0\%$ , and Operon's solution rate falls from $18.1\%$ to $0\%$ . As La Cava et al. (2021) noted, this limitation persists even when Operon is provided with only the relevant features $X_{S_0}$ and under favorable conditions ( $n = 100, 000$ and $\mathrm{SNR} = 100$ ), indicating that the issue lies beyond PAN pre-screening. Other performance metrics of this sensitivity analysis are available in Appendix D.5.
|
| 178 |
+
|
| 179 |
+

|
| 180 |
+
(a) False positive rate (FPR).
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
(b) False negative rate (FNR).
|
| 184 |
+
Figure 3: FPR and FNR of Operon, PAN+Operon, and PAN on the ground truth datasets. PAN refers to the proposed selection method in Section 4. Points indicate the mean performance and bars represent the $95\%$ confidence intervals.
|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
Figure 4: Results of selected methods on the ground truth problems with $n = 1000$ , $\mathrm{SNR} \in \{\infty, 10\}$ , and $s = 50$ . Points indicate the mean test set performance and bars represent the $95\%$ confidence intervals.
|
| 188 |
+
|
| 189 |
+
Beyond Operon, we also evaluated several top-performing SR methods on the ground-truth problems using $n = 1000$ and $\mathrm{SNR} \in \{\infty, 10\}$ . As shown in Figure 4, $\mathrm{PAN} + \mathrm{SR}$ consistently improves SR methods across all SNR levels, though all SR and their PAN-boosted variants become less accurate at $\mathrm{SNR} = 10$ , indicating the challenge when noise is present. In particular, GP-GOMEA performs similarly to Operon, with its solution rate dropping to $0\%$ at $\mathrm{SNR} = 10$ for both the standalone and PAN-boosted variants. The best-performing SR algorithm, uDSR, also exhibits vulnerability to noise, with its PAN-boosted solution rate falling from $71.8\%$ to $7.4\%$ . Surprisingly, PAN significantly benefits
|
| 190 |
+
|
| 191 |
+
DSR, the weakest SR algorithm in Figure 4, increasing its solution rate from $8.2\%$ to $14.9\%$ at $\mathrm{SNR} = 10$ and from $8.9\%$ to $25.8\%$ at $\mathrm{SNR} = \infty$ . These findings highlight the fundamental challenges noise introduces to SR algorithms. To date, SR algorithms have been predominantly developed for noiseless or high-SNR settings, even for "small $p$ " problems. We expect that iterative application of the proposed variable selection method, similar to Ye et al. (2024), along with careful consideration of the challenges in extreme-scale SR, could improve performance in low-SNR settings. This will be explored in future work.
|
| 192 |
+
|
| 193 |
+
# 7. Discussion
|
| 194 |
+
|
| 195 |
+
In this paper, we introduce PAN+SR, a novel framework designed to address the scalability challenges faced by SR methods when applied to high-dimensional datasets. The growing prevalence of big data necessitates tools capable of efficiently handling such complexity, and PAN+SR addresses this need by integrating a nonparametric prescreening mechanism with SR. This integration enables the framework to focus the model search on a relevant subset of features, reducing computational burden and improving accuracy.
|
| 196 |
+
|
| 197 |
+
The core innovation of PAN+SR lies in its nonparametric variable selection method, which filters the input dataset to reduce dimensionality before applying SR. A key challenge in this process is minimizing the risk of false negatives (FNs), where relevant features are mistakenly excluded. Such omissions can critically impair SR methods, as the success of SR depends on having access to the true feature set. To address this issue, we developed a variable selection method designed to ensure that the selected features
|
| 198 |
+
|
| 199 |
+
form a superset of the true feature set, effectively minimizing the FNR. Our approach leverages the characteristics of VIP rankings derived BART, providing a tuning-free, data-driven variable selection criterion capable of retaining relevant features while excluding irrelevant ones. By preserving a comprehensive set of candidate features, PAN+SR maximizes the likelihood of identifying the true underlying model.
|
| 200 |
+
|
| 201 |
+
We evaluated PAN+SR across a diverse set of datasets, including 35 high-dimensional real-world datasets from the PMLB database and 100 modified simulated datasets based on the Feynman Symbolic Regression Database. The results were highly promising: PAN+SR improved the performance of 18 out of 19 SR methods on real datasets and all 19 methods on simulated datasets when noise is absent. These findings underscore the framework's potential to enhance the robustness and scalability of SR methods across diverse datasets.
|
| 202 |
+
|
| 203 |
+
In addition, we explored the sensitivity of $\mathrm{PAN} + \mathrm{SR}$ to varying sample sizes and SNR. Our analysis demonstrated that the performance gains achieved by $\mathrm{PAN} + \mathrm{SR}$ are consistent across different sample sizes and remain robust in the presence of noise. Like our extended Feynman database, SDSR (Matsubara et al., 2024) augments the original Feynman database with irrelevant features, bringing the synthetic benchmarks closer to real-world scientific process. However, SDSR adds only 1-3 irrelevant variables, while our setup introduces 100-450 irrelevant variables, posing a substantially more challenging test for both variable selection and symbolic regression. Nonetheless, SDSR rectifies several physical inconsistencies present in the original Feynman benchmark, such as a more realistic treatment of constants and integer-valued variables, and a more careful specification of sampling ranges. Our investigation extends beyond ground-truth datasets by incorporating black-box datasets, thereby mitigating, to some extent, the limitations inherent in purely simulated data. Still, we view SRSD as a valuable and complementary benchmark and plan to incorporate its refinements in future evaluations. In summary, $\mathrm{PAN} + \mathrm{SR}$ provides a significant step forward in enabling SR methods to handle the complexities of modern datasets, offering improved performance and scalability across a wide range of applications.
|
| 204 |
+
|
| 205 |
+
# Impact Statement
|
| 206 |
+
|
| 207 |
+
This paper presents work whose goal is to advance the field of Machine Learning. There are many potential societal consequences of our work, none of which we feel must be specifically highlighted here.
|
| 208 |
+
|
| 209 |
+
# References
|
| 210 |
+
|
| 211 |
+
Arnaldo, I., Krawiec, K., and O'Reilly, U.-M. Multiple regression genetic programming. In Proceedings of the 2014 Annual Conference on Genetic and Evolutionary Computation, GECCO '14, pp. 879-886, New York, NY, USA, 2014. Association for Computing Machinery.
|
| 212 |
+
Bleich, J., Kapelner, A., George, E. I., and Jensen, S. T. Variable selection for BART: an application to gene regulation. Annals of Applied Statistics, 8(3):1750-1781, 09 2014.
|
| 213 |
+
Burlacu, B., Kronberger, G., and Kommenda, M. Operon $\mathrm{C} + +$ : an efficient genetic programming framework for symbolic regression. In Proceedings of the 2020 Genetic and Evolutionary Computation Conference Companion, GECCO '20, pp. 1562-1570, New York, NY, USA, 2020. Association for Computing Machinery.
|
| 214 |
+
Candès, E., Fan, Y., Janson, L., and Lv, J. Panning for Gold: 'Model-X' Knockoffs for High Dimensional Controlled Variable Selection. Journal of the Royal Statistical Society Series B: Statistical Methodology, 80(3):551-577, 01 2018.
|
| 215 |
+
Chipman, H. A., George, E. I., and McCulloch, R. E. BART: Bayesian additive regression trees. Annals of Applied Statistics, 4(1):266-298, 03 2010.
|
| 216 |
+
Cranmer, M. Interpretable Machine Learning for Science with PySR and SymbolicRegression.jl. arXiv:2305.01582, 2023.
|
| 217 |
+
de Franca, F. O. and Aldeaia, G. S. I. Interaction-transformation evolutionary algorithm for symbolic regression. Evolutionary Computation, 29(3):367-390, 09 2021.
|
| 218 |
+
Derner, E., Kubalík, J., Ancona, N., and Babuška, R. Constructing parsimonious analytic models for dynamic systems via symbolic regression. Applied Soft Computing, 94:106432, 2020.
|
| 219 |
+
Dick, G. Genetic programming, standardisation, and stochastic gradient descent revisited: initial findings on srbench. In Proceedings of the Genetic and Evolutionary Computation Conference Companion, GECCO '22, pp. 2265-2273, New York, NY, USA, 2022. Association for Computing Machinery.
|
| 220 |
+
Fan, J., Feng, Y., and and, R. S. Nonparametric independence screening in sparse ultra-high-dimensional additive models. Journal of the American Statistical Association, 106(494):544-557, 2011.
|
| 221 |
+
Feynman, R. P., Leighton, R. B., and Sands, M. The Feynman Lectures on Physics. Basic Books, New York, NY, 2010.
|
| 222 |
+
|
| 223 |
+
Friedman, J. H. Multivariate Adaptive Regression Splines. The Annals of Statistics, 19(1):1-67, 1991.
|
| 224 |
+
Hernandez, A., Balasubramanian, A., Yuan, F., Mason, S. A. M., and Mueller, T. Fast, accurate, and transferable many-body interatomic potentials by symbolic regression. npj Computational Materials, 5(1):112, November 2019.
|
| 225 |
+
Jin, Y., Fu, W., Kang, J., Guo, J., and Guo, J. Bayesian Symbolic Regression. arXiv:1910.08892, 2020.
|
| 226 |
+
Kamienny, P.-a., d'Ascoli, S., Lample, G., and Charton, F. End-to-end symbolic regression with transformers. In Koyejo, S., Mohamed, S., Agarwal, A., Belgrave, D., Cho, K., and Oh, A. (eds.), Advances in Neural Information Processing Systems, volume 35, pp. 10269-10281. Curran Associates, Inc., 2022.
|
| 227 |
+
Kamienny, P.-A., Lample, G., Lamprier, S., and Virgolin, M. Deep generative symbolic regression with Monte-Carlo-tree-search. In Proceedings of the 40th International Conference on Machine Learning, ICML'23, pp. 15655-15668. JMLR.org, 2023.
|
| 228 |
+
Kelly, M., Longjohn, R., and Nottingham, K. The UCI Machine Learning Repository, 2013.
|
| 229 |
+
Keren, L. S., Liberzon, A., and Lazebnik, T. A computational framework for physics-informed symbolic regression with straightforward integration of domain knowledge. Scientific Reports, 13(1):1249, January 2023.
|
| 230 |
+
Kronberger, G., Kommenda, M., Promberger, A., and Nickel, F. Predicting friction system performance with symbolic regression and genetic programming with factor variables. In Proceedings of the Genetic and Evolutionary Computation Conference, GECCO '18, pp. 1278-1285, New York, NY, USA, 2018. Association for Computing Machinery.
|
| 231 |
+
La Cava, W., Spector, L., Danai, K., and Lackner, M. Evolving differential equations with developmental linear genetic programming and epigenetic hill climbing. In Proceedings of the Companion Publication of the 2014 Annual Conference on Genetic and Evolutionary Computation, GECCO Comp '14, pp. 141-142, New York, NY, USA, 2014. Association for Computing Machinery.
|
| 232 |
+
La Cava, W., Helmuth, T., Spector, L., and Moore, J. H. A Probabilistic and Multi-Objective Analysis of Lexicase Selection and $\varepsilon$ -Lexicase Selection. Evolutionary Computation, 27(3):377-402, September 2019a.
|
| 233 |
+
La Cava, W., Singh, T. R., Taggart, J., Suri, S., and Moore, J. Learning concise representations for regression by evolving networks of trees. In International Conference on Learning Representations, 2019b.
|
| 234 |
+
|
| 235 |
+
La Cava, W., Orzechowski, P., Burlacu, B., de Franca, F., Virgolin, M., Jin, Y., Kommenda, M., and Moore, J. Contemporary symbolic regression methods and their relative performance. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, volume 1, 2021.
|
| 236 |
+
Lafferty, J. and Wasserman, L. Rodeo: Sparse, greedy nonparametric regression. The Annals of Statistics, 36(1): 28-63, 2008.
|
| 237 |
+
Landajuela, M., Lee, C. S., Yang, J., Glatt, R., Santiago, C. P., Aravena, I., Mundhenk, T., Mulcahy, G., and Petersen, B. K. A unified framework for deep symbolic regression. In Koyejo, S., Mohamed, S., Agarwal, A., Belgrave, D., Cho, K., and Oh, A. (eds.), Advances in Neural Information Processing Systems, volume 35, pp. 33985-33998. Curran Associates, Inc., 2022.
|
| 238 |
+
Lemos, P., Jeffrey, N., Cranmer, M., Ho, S., and Battaglia, P. Rediscovering orbital mechanics with machine learning. Machine Learning: Science and Technology, 4(4):045002, October 2023.
|
| 239 |
+
Li, W., Li, W., Yu, L., Wu, M., Sun, L., Liu, J., Li, Y., Wei, S., Yusong, D., and Hao, M. A neural-guided dynamic symbolic network for exploring mathematical expressions from data. In Salakhutdinov, R., Kolter, Z., Heller, K., Weller, A., Oliver, N., Scarlett, J., and Berkenkamp, F. (eds.), Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 28222-28242. PMLR, 21-27 Jul 2024.
|
| 240 |
+
Liu, C.-Y., Zhang, S., Martinez, D., Li, M., and Senftle, T. P. Using statistical learning to predict interactions between single metal atoms and modified MgO (100) supports. npj Computational Materials, 6(1):102, 2020.
|
| 241 |
+
Liu, C.-Y., Ye, S., Li, M., and Senftle, T. P. A rapid feature selection method for catalyst design: Iterative Bayesian additive regression trees (iBART). The Journal of Chemical Physics, 156(16), 2022.
|
| 242 |
+
Liu, Y., Ročková, V., and Wang, Y. Variable selection with ABC Bayesian forests. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 83(3):453-481, 04 2021.
|
| 243 |
+
Makke, N. and Chawla, S. Interpretable scientific discovery with symbolic regression: a review. Artificial Intelligence Review, 57(1):2, January 2024.
|
| 244 |
+
Märtens, M. and Izzo, D. Symbolic regression for space applications: Differentiable cartesian genetic programming powered by multi-objective memetic algorithms. arXiv:2206.06213, 2022.
|
| 245 |
+
|
| 246 |
+
Matsubara, Y., Chiba, N., Igarashi, R., and Ushiku, Y. Rethinking symbolic regression datasets and benchmarks for scientific discovery. Journal of Data-centric Machine Learning Research, 2024.
|
| 247 |
+
McConaghy, T. FFX: Fast, Scalable, Deterministic Symbolic Regression Technology, pp. 235-260. Springer New York, New York, NY, 2011.
|
| 248 |
+
Meurer, A., Smith, C. P., Paprocki, M., Certík, O., Kirpichev, S. B., Rocklin, M., Kumar, A., Ivanov, S., Moore, J. K., Singh, S., Rathnayake, T., Vig, S., Granger, B. E., Muller, R. P., Bonazzi, F., Gupta, H., Vats, S., Johansson, F., Pedregosa, F., Curry, M. J., Terrel, A. R., Roučka, v., Saboo, A., Fernando, I., Kulal, S., Cirmrnan, R., and Scopatz, A. SymPy: symbolic computing in Python. PeerJ Computer Science, 3:e103, January 2017.
|
| 249 |
+
Petersen, B. K., Larma, M. L., Mundhenk, T. N., Santiago, C. P., Kim, S. K., and Kim, J. T. Deep symbolic regression: Recovering mathematical expressions from data via risk-seeking policy gradients. In International Conference on Learning Representations, 2021.
|
| 250 |
+
Romano, J. D., Le, T. T., La Cava, W., Gregg, J. T., Goldberg, D. J., Chakraborty, P., Ray, N. L., Himmelstein, D., Fu, W., and Moore, J. H. PMLB v1.0: an open-source dataset collection for benchmarking machine learning methods. Bioinformatics, 38(3):878-880, October 2021.
|
| 251 |
+
Schmidt, M. D. and Lipson, H. Age-fitness pareto optimization. In Proceedings of the 12th Annual Conference on Genetic and Evolutionary Computation, GECCO '10, pp. 543-544, New York, NY, USA, 2010. Association for Computing Machinery.
|
| 252 |
+
Shojaee, P., Meidani, K., Barati Farimani, A., and Reddy, C. Transformer-based planning for symbolic regression. In Oh, A., Naumann, T., Globerson, A., Saenko, K., Hardt, M., and Levine, S. (eds.), Advances in Neural Information Processing Systems, volume 36, pp. 45907-45919. Curran Associates, Inc., 2023.
|
| 253 |
+
Stephens, T. gplearn: Genetic Programming in Python. https://github.com/trevorstephens/gplearn, 2020.
|
| 254 |
+
Strogatz, S. H. Nonlinear Dynamics and Chaos: With Applications to Physics, Biology, Chemistry, and Engineering. CRC Press, 2015.
|
| 255 |
+
Tenachi, W., Ibata, R., and Diakogiannis, F. I. Deep symbolic regression for physics guided by units constraints: Toward the automated discovery of physical laws. The Astrophysical Journal, 959(2):99, December 2023.
|
| 256 |
+
Udrescu, S.-M. and Tegmark, M. AI Feynman: A physics-inspired method for symbolic regression. Science Advances, 6(16):eaay2631, 2020.
|
| 257 |
+
|
| 258 |
+
Udrescu, S.-M., Tan, A., Feng, J., Neto, O., Wu, T., and Tegmark, M. AI Feynman 2.0: Pareto-optimal symbolic regression exploiting graph modularity. In Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., and Lin, H. (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 4860-4871. Curran Associates, Inc., 2020.
|
| 259 |
+
Vanschoren, J., van Rijn, J. N., Bischl, B., and Torgo, L. Openml: networked science in machine learning. SIGKDD Explor. Newsl., 15(2):49-60, June 2014.
|
| 260 |
+
Verstyuk, S. and Douglas, M. R. Machine learning the gravity equation for international trade. Available at SSRN 4053795, 2022.
|
| 261 |
+
Virgolin, M. and Pissis, S. P. Symbolic regression is NP-hard. Transactions on Machine Learning Research, 2022.
|
| 262 |
+
Virgolin, M., Alderliesten, T., and Bosman, P. A. N. Linear scaling with and within semantic backpropagation-based genetic programming for symbolic regression. In Proceedings of the Genetic and Evolutionary Computation Conference, GECCO '19, pp. 1084-1092, New York, NY, USA, 2019. Association for Computing Machinery.
|
| 263 |
+
Virgolin, M., Wang, Z., Alderliesten, T., and Bosman, P. A. N. Machine learning for the prediction of pseudorealistic pediatric abdominal phantoms for radiation dose reconstruction. Journal of Medical Imaging, 7(4):046501, 2020.
|
| 264 |
+
Virgolin, M., Alderliesten, T., Witteveen, C., and Bosman, P. A. N. Improving Model-Based Genetic Programming for Symbolic Regression of Small Expressions. Evolutionary Computation, 29(2):211-237, 06 2021.
|
| 265 |
+
Wu, T. and Tegmark, M. Toward an artificial intelligence physicist for unsupervised learning. Phys. Rev. E, 100: 033311, September 2019.
|
| 266 |
+
Ye, S., Senftle, T. P., and Li, M. Operator-Induced Structural Variable Selection for Identifying Materials Genes. Journal of the American Statistical Association, 119(545): 81-94, 2024.
|
| 267 |
+
|
| 268 |
+
# A. Reproducing the Experiment
|
| 269 |
+
|
| 270 |
+
The experiment made use of an existing symbolic regression (SR) benchmarking platform, SRBench (La Cava et al., 2021), and changes were made to facilitate other functionalities, including signal-to-noise ratio (SNR) tuning, feature pre-screening, and variable usage accuracy calculation. The README file in our GitHub repository https://github.com/mattsheng/PAN_SR details the complete set of commands for reproducing the experiment. Here, we provide a short summary of the experiment process. Experiments are launched from the experiments/ folder via the script analyze.py. After installing and configuring the conda environment provided by SRBench, the complete black-box experiment on standalone SR methods can be started via the following command:
|
| 271 |
+
|
| 272 |
+
```shell
|
| 273 |
+
python analyze.py /path/to/pmlb/
|
| 274 |
+
2 -results ./results/blackbox/SR/
|
| 275 |
+
3 -n_trials 10
|
| 276 |
+
4 -time_limit 24:00
|
| 277 |
+
5 -tuned -skip_tuning
|
| 278 |
+
```
|
| 279 |
+
|
| 280 |
+
To enable PAN pre-screening, the users can either specify the path to a pre-run variable selection result or run the prescreening in place. The first option is useful when the users need to compare different SR methods on the same dataset:
|
| 281 |
+
|
| 282 |
+
```shell
|
| 283 |
+
python analyze.py /path/to/pmlb \
|
| 284 |
+
results ../results/blackbox/SR_BART_VIP \
|
| 285 |
+
-n_trials 10 \
|
| 286 |
+
-time_limit 24:00 \
|
| 287 |
+
-vs_method BART_VIP \
|
| 288 |
+
-vs_result_path ../results/blackbox/pmlb_BART_VIP_withidx.feather \
|
| 289 |
+
-vs_idx_label idx_hclst \
|
| 290 |
+
-tuned -skip_tuning
|
| 291 |
+
```
|
| 292 |
+
|
| 293 |
+
If no path is given to -vs_result_path, the PAN pre-screening will be run in place. Similarly, the ground-truth experiment for the standalone SR methods on Feynman datasets with a sample size of $n = 1000$ and an SNR of 10 can be run by the following command:
|
| 294 |
+
|
| 295 |
+
```txt
|
| 296 |
+
python analyze.py /path/to/feynman \
|
| 297 |
+
2 -results ../results_feynman/SR \
|
| 298 |
+
3 -signal_to_noise 10 \
|
| 299 |
+
4 -n 1000 \
|
| 300 |
+
5 -sym_data \
|
| 301 |
+
6 -n_trials 10 \
|
| 302 |
+
7 -time_limit 24:00 \
|
| 303 |
+
8 -tuned -skip_tuning
|
| 304 |
+
```
|
| 305 |
+
|
| 306 |
+
Note that -sym_data enables more performance metric calculations only available for ground-truth problems. To run PAN pre-screening only on the Feynman datasets with a sample size of $n = 1000$ and an SNR of 10, we can use the following command:
|
| 307 |
+
|
| 308 |
+
```shell
|
| 309 |
+
python analyze.py /path/to/feynman \
|
| 310 |
+
2 -script BART_selection \
|
| 311 |
+
3 -ml BART_VIP \
|
| 312 |
+
4 -results ./results_feynman/BART_VIP/n_1000/ \
|
| 313 |
+
5 -signal_to_noise 10 \
|
| 314 |
+
6 -n 1000 \
|
| 315 |
+
7 -sym_data \
|
| 316 |
+
8 -n_trials 10 \
|
| 317 |
+
9 -rep 20 \
|
| 318 |
+
10 -time_limit 24:00
|
| 319 |
+
```
|
| 320 |
+
|
| 321 |
+
The -rep 20 argument instructs the program to run $K = 20$ replications of BART for estimating the variable ranking $r_{jk}$ of the $j$ th feature at the $k$ th run. Users can use other variable selection methods by modifying the BART_selection.py script.
|
| 322 |
+
|
| 323 |
+
# B. Additional Dataset Information
|
| 324 |
+
|
| 325 |
+
PMLB datasets Black-box datasets and their metadata are available from PMLB under an MIT license and is described in detail in Romano et al. (2021). In this experiment, we only focus on high-dimensional regression datasets available from PMLB. Specifically, we use PMLB regression datasets satisfying the following criteria:
|
| 326 |
+
|
| 327 |
+
1. $n < 200$ and $p\geq 10$ , or
|
| 328 |
+
2. $n\geq 200$ and $p\geq 20$
|
| 329 |
+
|
| 330 |
+
Furthermore, datasets that have categorical features (number of unique value $\leq 5$ ) or non-continuous response variable (proportion of unique value $< 0.9$ ) are excluded since they are incorrectly classified as regression task (Dick, 2022). Among the datasets meeting these criteria, we found that two datasets, 195-auto-price and 207_autoprice, are identical, and we only kept 195-auto-price in our analysis. See Dick (2022) for a detailed analysis of the dataset duplication and incorrect problem classification issues of PMLB.
|
| 331 |
+
|
| 332 |
+
Feynman datasets The original Feynman database described in Udrescu and Tegmark (2020) consists of only the relevant features $X_{S_0}$ and a large sample size of $n = 10^5$ , and is available in Feynman Symbolic Regression Database (https://space.mit.edu/home/tegmark/aifeynman.html). We extended the Feynman Symbolic Regression Database to include irrelevant features $X_{\mathrm{irr}}^{j} \in \mathbb{R}^{n \times s}$ for each relevant feature $x_j$ , $j \in S_0$ . To take advantage of the SRBench platform, we standardized the Feynman equations to PMLB format and included metadata detailing the true model and the units of each variable. The extended Feynman datasets are generated using the Python script provided in feynman-dataset-code/generate_feynman-dataset.py. To avoid the need to generate different datasets for each sample size $n$ considered in the main paper, we set $s = 50$ and $n = 100,000$ for all Feynman equations with random state control; we refer to this as the full Feynman datasets. In the experiment, the full Feynman datasets are randomly split into a $75\% / 25\%$ train/test set. If the train set contains more samples than the desired training sample size $n$ , the train and test sets will be further subsampled so that $X_{\mathrm{train}}$ has exactly $n$ samples and $X_{\mathrm{test}}$ has exactly $\lfloor n / 3 \rfloor$ samples.
|
| 333 |
+
|
| 334 |
+
Users can also generate datasets using other data-generating functions $f_{0}$ by supplying a CSV file with the expression of $f_{0}(\cdot)$ and an additional CSV file describing the desired uniform distribution (i.e., the lower and upper bounds of the distribution) of each variable in $f_{0}(\cdot)$ . See feynman_dataset_code/FeynmanEquations.csv and feynman_dataset_code/units.csv for more details.
|
| 335 |
+
|
| 336 |
+
Sampling Process for Extended Feynman Datasets The sampling process for the extended Feynman datasets is described in the main text and is reproduced here for completeness of the data description in this section.
|
| 337 |
+
|
| 338 |
+
For each equation $f_0(\cdot)$ in the Feynman Lectures on Physics, we generated the relevant features $X_{S_0}$ following Udrescu and Tegmark (2020):
|
| 339 |
+
|
| 340 |
+
$$
|
| 341 |
+
\left(x _ {1, j}, \dots , x _ {n, j}\right) \stackrel {\text {i d}} {\sim} \operatorname {U n i f} \left(a _ {j}, b _ {j}\right), \quad \text {f o r} 1 \leq j \leq p _ {0}, \tag {6}
|
| 342 |
+
$$
|
| 343 |
+
|
| 344 |
+
where $p_0 = |\mathcal{S}_0|$ is the number of relevant features, $n$ is the sample size, and $a_j$ and $b_j$ are the lower and upper bounds for feature $x_j$ described in https://space.mit.edu/home/tegmark/aifeynman/FeynmanEquations.csv. Then, the response variable is generated as follow:
|
| 345 |
+
|
| 346 |
+
$$
|
| 347 |
+
y _ {i} = f _ {0} \left(x _ {i, 1}, \dots , x _ {i, p _ {0}}\right) + \varepsilon_ {i}, \quad \text {f o r} 1 \leq i \leq n, \tag {7}
|
| 348 |
+
$$
|
| 349 |
+
|
| 350 |
+
where $\varepsilon_{i}\stackrel{\mathrm{ii d}}{\sim}N(0,\sigma_{\varepsilon}^{2})$ is an additive Gaussian error, $\sigma_f^2$ denotes the sample variance of $f_0(\cdot)$ , and $\sigma_{\varepsilon}^{2} = \sigma_{f}^{2} / \mathrm{SNR}$ is the error variance tuned to a prescribed signal-to-noise ratio (SNR). When $\sigma_{\varepsilon}^{2} = 0$ (i.e., $\mathrm{SNR} = \infty$ ), (6) and (7) generate the original Feynman Symbolic Regression Database.
|
| 351 |
+
|
| 352 |
+
For each relevant feature $\boldsymbol{x}_j, j = 1, \ldots, p_0$ , we generate $s = 50$ copies of irrelevant features following the distribution of $\boldsymbol{x}_j$ : $(\boldsymbol{x}_{j,\mathrm{irr}}^1, \ldots, \boldsymbol{x}_{j,\mathrm{irr}}^s) \stackrel{\mathrm{iid}}{\sim} \mathrm{Unif}(a_j, b_j)$ . Then, the final feature matrix is $\boldsymbol{X} = [X_{\mathcal{S}_0}, X_{\mathrm{irr}}^1, \ldots, X_{\mathrm{irr}}^{p_0}] \in \mathbb{R}^{n \times p}$ , where $\boldsymbol{X}_{\mathrm{irr}}^j = (\boldsymbol{x}_{j,\mathrm{irr}}^1, \ldots, \boldsymbol{x}_{j,\mathrm{irr}}^s) \in \mathbb{R}^{n \times s}$ is the irreverent feature matrix induced by the $j$ th relevant feature for $j = 1, \ldots, p_0$ , totaling $p = p_0(1 + s)$ number of features.
|
| 353 |
+
|
| 354 |
+
In Appendix D.4, we consider sampling processes where features are not iid sampled from a uniform distribution.
|
| 355 |
+
|
| 356 |
+
# C. Additional Experiment Details
|
| 357 |
+
|
| 358 |
+
# C.1. General Experiment Settings
|
| 359 |
+
|
| 360 |
+
Experiments were run in a heterogeneous cluster composed of nodes with Intel(R) Xeon(R) CPU E5-2620 v2 @ 2.60GHz, Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz, Intel(R) Xeon(R) Gold 6126 CPU @ 2.60GHz, Intel(R) Xeon(R) Gold 6230 CPU @ 2.10GHz, and AMD EPYC 7642 CPU @ 2.3GHz processors. The training of a single method on a single dataset for a fixed random seed was considered a job. Each job was managed by SLURM Workload Manager to receive one CPU core, 16GB of RAM, and a time limit of 24 hours. For the ground-truth problems, each final model was given an additional 5 minutes for each of the following steps: 1) cleaning the model for SymPy parsing, 2) simplifying the cleaned model using SymPy, 3) checking the difference solution criterion of the simplified model, 4) checking the ratio solution criterion of the simplified model, and 5) calculating model size (complexity). When the simplification of the cleaned model exceeded the 5-minute wall clock, steps 3-5 were run on the cleaned model instead.
|
| 361 |
+
|
| 362 |
+
# C.2. Implementation Details of the Proposed Variable Selection Method
|
| 363 |
+
|
| 364 |
+
The proposed method uses the bartMachine R package for its BART implementation. For each dataset, we fit $K = 20$ independent BART models and record the ranking $r_{j,k}$ of variable $x_{j}$ 's variable inclusion proportion (VIP) in the $k$ th run; the hyperparameters for bartMachine are summarized in Table 2. To cluster the VIP rankings into 2 clusters, we use the hclust function in R to perform agglomeration clustering (unweighted pair group method with arithmetic mean) on the Euclidean dissimilarity matrix of the VIP rankings. Then, $x_{j}$ is selected if $\bar{r}_{j} = \sum_{k=1}^{K} r_{j,k} / K$ belongs to the low-mean cluster.
|
| 365 |
+
|
| 366 |
+
Table 2: Hyperparameters in bartMachine.
|
| 367 |
+
|
| 368 |
+
<table><tr><td>Parameter</td><td>Value</td></tr><tr><td># of trees</td><td>20</td></tr><tr><td># of burn-in samples</td><td>10,000</td></tr><tr><td># of posterior samples</td><td>10,000</td></tr></table>
|
| 369 |
+
|
| 370 |
+
# D. Additional Results
|
| 371 |
+
|
| 372 |
+
# D.1. Visualization of Average VIP Rankings $\bar{r}_j$ .
|
| 373 |
+
|
| 374 |
+
Figure 5 shows the average BART VIP rankings for Feynman equation I-38-12 with $n = 1000$ . At high SNR, there is a clear separation between the low- and high-mean clusters, and the hypothesized cluster means closely match their actual values. As SNR decreases, irrelevant features tend to receive higher rankings, slightly shifting the cluster means incurring more false positives (FPs). Despite this deviation, the cluster means remain far apart, ensuring separation between relevant and irrelevant features.
|
| 375 |
+
|
| 376 |
+
Figure 6 further demonstrates the clustering accuracy of the proposed method. Regardless of the SNR level, all true features are consistently assigned to the low-mean cluster, which is highly desirable in the PAN+SR framework. While decreasing SNR leads to some misclassification of the irrelevant features, the proposed method ensures that no true features are excluded. This robustness in retaining the true features under varying noise levels makes the proposed method well-suited for PAN+SR framework and high-dimensional SR tasks.
|
| 377 |
+
|
| 378 |
+

|
| 379 |
+
|
| 380 |
+

|
| 381 |
+
|
| 382 |
+

|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
|
| 386 |
+

|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
|
| 390 |
+

|
| 391 |
+
Average VIP Ranking Cluster low-mean high-mean
|
| 392 |
+
|
| 393 |
+

|
| 394 |
+
Figure 5: Average BART VIP rankings $\bar{r}_j$ . over $K = 20$ runs on Feynman equation I-38-12 with $n = 1000$ , $p_0 = 4$ , and $p = 204$ . Black vertical dashed lines indicate the cluster means. Red solid vertical lines are the hypothesized cluster means: $(1 + p_0) / 2 = 2.5$ and $(p_0 + 1 + p) / 2 = 104.5$ .
|
| 395 |
+
|
| 396 |
+

|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
|
| 400 |
+

|
| 401 |
+
|
| 402 |
+

|
| 403 |
+
|
| 404 |
+

|
| 405 |
+
|
| 406 |
+

|
| 407 |
+
|
| 408 |
+

|
| 409 |
+
True Label
|
| 410 |
+
|
| 411 |
+

|
| 412 |
+
Irrelevant Feature
|
| 413 |
+
|
| 414 |
+

|
| 415 |
+
Figure 6: Hierarchical clustering accuracy on Feynman equation I-38-12 with $n = 1000$ , $p_0 = 4$ , and $p = 204$ . Red and teal represent the low- and high-mean clusters, respectively. Circles and triangles represent relevant and irrelevant features, respectively.
|
| 416 |
+
|
| 417 |
+

|
| 418 |
+
True Feature
|
| 419 |
+
|
| 420 |
+

|
| 421 |
+
low-1
|
| 422 |
+
|
| 423 |
+

|
| 424 |
+
high-mean
|
| 425 |
+
|
| 426 |
+
# D.2. Analysis of Different Nonparametric Variable Selection Methods
|
| 427 |
+
|
| 428 |
+

|
| 429 |
+
Figure 7: True positive rate (TPR) on the Feynman datasets for $n = 500,1000,1500,2000$ and $\mathrm{SNR} = \infty, 20, 15, 10, 5, 2, 1, 0.5$ . Points indicate the mean performance, and bars show the $95\%$ confidence interval. VIP Rank is the proposed method for PAN pre-screening. Local, G.SE, G.MAX, and RF are alternative nonparametric variable selection methods.
|
| 430 |
+
|
| 431 |
+
PAN pre-screening presents a unique challenge to nonparametric variable selection methods where any missed signals (false negative) will eliminate the correct expression $f_0(\cdot)$ from the search space. That is, a true positive rate (TPR) near $100\%$ in the pre-screening phase is necessary to ensure successful SR tasks. Figure 7 compares the average TPR of five nonparametric variable selection methods across various configurations of $n$ and SNR on the Feynman datasets. VIP Rank, the proposed method, is compared with three BART permutation test-based methods (Local, G.SE, and G.MAX) (Bleich et al., 2014) and the Random Forest (RF) variable selection method in PySR (Cranmer, 2023). Of the three BART permutation test-based methods, BART-Local applies the least stringent selection criteria, while BART-G.MAX is the most stringent, with BART-G.SE offering a balance between the two. The RF implementation requires users to specify the number of selected variables $k$ , which we tuned over $\{1,2,\dots,20\}$ using 5-fold cross-validation.
|
| 432 |
+
|
| 433 |
+
VIP Rank consistently achieves the highest TPR, nearing or reaching $100\%$ across all experimental settings. In noiseless conditions $(\mathrm{SNR} = \infty)$ , only VIP Rank attains a perfect TPR of $100\%$ . Although there is a slight TPR decline for VIP Rank at $n = 500$ and $\mathrm{SNR} \leq 5$ , it still outperforms the other methods, particularly at $n = 500$ and $\mathrm{SNR} = 0.5$ . These results reinforce the need for a specialized variable selection method for PAN pre-screening. In addition to the four methods considered here, we point readers to Ye et al. (2024), where they analyzed three additional nonparametric variable selection methods and showed that none outperform BART-G.SE in terms of TPR.
|
| 434 |
+
|
| 435 |
+
Figure 8 illustrates the false positive rate (FPR), a crucial metric for evaluating variable selection accuracy. As discussed in the main paper, VIP Rank produces higher FPR under low SNR conditions—a tradeoff made to maintain a near-perfect true positive rate (TPR). While this tradeoff may be undesirable for typical variable selection tasks, it is acceptable for PAN pre-screening, where minimizing false negatives (FNs) is the priority. The three BART permutation-based methods and RF consistently maintain low and robust FPRs across all settings of $n$ and SNR. However, as Figure 7 shows, this strict control of FPR comes at the cost of worse TPR performance.
|
| 436 |
+
|
| 437 |
+
To further evaluate the impact of variable selection methods in the PAN+SR framework, we replaced VIP Rank with BART-G.SE and compared their performance using Operon as the SR method. Operon was chosen for this analysis due to
|
| 438 |
+
|
| 439 |
+

|
| 440 |
+
Figure 8: False positive rate (FPR) on the Feynman datasets for $n = 500,1000,1500,2000$ and $\mathrm{SNR} = \infty, 20, 15, 10, 5, 2, 1, 0.5$ . Points indicate the mean performance, and bars show the $95\%$ confidence interval. VIP Rank is the proposed method for PAN pre-screening. Local, G.SE, G.MAX, and RF are alternative nonparametric variable selection methods.
|
| 441 |
+
|
| 442 |
+
its strong $R^2$ performance in both the black-box and ground-truth experiments. Table 3 summarizes the average test set $R^2$ on the Feynman dataset. VIP+SR consistently achieves the highest $R^2$ across all experimental settings. For instance, at $n = 500$ and $\mathrm{SNR} = 20$ , VIP+SR achieves an average $R^2$ of 0.892, compared to 0.860 for GSE+SR and 0.846 for standalone SR. Under high noise conditions, VIP+SR continues to demonstrate better robustness than GSE+SR. At $n = 500$ and $\mathrm{SNR} = 0.5$ , VIP+SR scores 0.145, slightly outperforming GSE+SR (0.142) and standalone (0.142). This trend is consistent across different different sample sizes $n$ .
|
| 443 |
+
|
| 444 |
+
# D.3. Effect of Different Clustering Algorithms
|
| 445 |
+
|
| 446 |
+
The proposed VIP Rank variable selection method can be implemented using various off-the-shelf clustering algorithms. However, due to the class imbalance nature of the variable selection problem, not all clustering algorithms are suitable. In this ablation study, we examine the effect of clustering algorithms on TPR and FPR performances of VIP Rank. We elected 10 clustering algorithms available in scikit-learn v1.5.7: agglomerative hierarchical clustering (AHC), k-means++, Gaussian mixture model (GMM), Birch, Mean Shift, Affinity Propagation, Spectral, OPTICS, HDBSCAN, and DBSCAN.
|
| 447 |
+
|
| 448 |
+
As illustrated in Figure 9, the first 5 clustering algorithms (AHC, k-mean++, GMM, Birch, Mean Shift) achieve the highest TPR across all simulation settings with indistinguishable differences. Affinity Propagation also has similar TPR compared with the top 5 algorithms but lacks behind in noisy (e.g., $\mathrm{SNR} = 0.5$ ) and small- $n$ (e.g., $n = 500$ ) settings. The rest of the pack has significantly worse TPR and are thus not suitable in VIP Rank.
|
| 449 |
+
|
| 450 |
+
Since the top 5 algorithms have indistinguishable TPR, we elect one with the least FPR. As shown in Figure 10, AHC has significantly lower FPR than the rest of the top 5 algorithms across most simulation settings. Combine with its near $100\%$ TPR, AHC is capable of identifying a more compact feature set that has a high probability of containing all relevant features.
|
| 451 |
+
|
| 452 |
+

|
| 453 |
+
Figure 9: True positive rate of various ablations of clustering algorithm.
|
| 454 |
+
|
| 455 |
+

|
| 456 |
+
Figure 10: False positive rate of various ablations of clustering algorithm.
|
| 457 |
+
|
| 458 |
+
Table 3: Average test set $R^2$ . The highest value in each experimental setting is in bold.
|
| 459 |
+
|
| 460 |
+
<table><tr><td></td><td>noiseless</td><td>20</td><td>15</td><td>10</td><td>5</td><td>2</td><td>1</td><td>0.5</td></tr><tr><td colspan="9">n = 500</td></tr><tr><td>VIP+SR</td><td>0.974</td><td>0.892</td><td>0.870</td><td>0.837</td><td>0.730</td><td>0.525</td><td>0.335</td><td>0.145</td></tr><tr><td>GSE+SR</td><td>0.948</td><td>0.860</td><td>0.859</td><td>0.818</td><td>0.710</td><td>0.510</td><td>0.327</td><td>0.142</td></tr><tr><td>SR</td><td>0.915</td><td>0.846</td><td>0.840</td><td>0.792</td><td>0.702</td><td>0.506</td><td>0.322</td><td>0.142</td></tr><tr><td colspan="9">n = 1000</td></tr><tr><td>VIP+SR</td><td>0.984</td><td>0.919</td><td>0.901</td><td>0.867</td><td>0.774</td><td>0.586</td><td>0.406</td><td>0.229</td></tr><tr><td>GSE+SR</td><td>0.971</td><td>0.914</td><td>0.897</td><td>0.851</td><td>0.774</td><td>0.574</td><td>0.405</td><td>0.229</td></tr><tr><td>SR</td><td>0.942</td><td>0.883</td><td>0.867</td><td>0.825</td><td>0.747</td><td>0.580</td><td>0.393</td><td>0.227</td></tr><tr><td colspan="9">n = 1500</td></tr><tr><td>VIP+SR</td><td>0.990</td><td>0.928</td><td>0.909</td><td>0.874</td><td>0.792</td><td>0.612</td><td>0.433</td><td>0.260</td></tr><tr><td>GSE+SR</td><td>0.961</td><td>0.910</td><td>0.899</td><td>0.866</td><td>0.781</td><td>0.600</td><td>0.428</td><td>0.257</td></tr><tr><td>SR</td><td>0.956</td><td>0.895</td><td>0.878</td><td>0.856</td><td>0.761</td><td>0.592</td><td>0.426</td><td>0.255</td></tr><tr><td colspan="9">n = 2000</td></tr><tr><td>VIP+SR</td><td>0.990</td><td>0.935</td><td>0.914</td><td>0.887</td><td>0.805</td><td>0.619</td><td>0.448</td><td>0.277</td></tr><tr><td>GSE+SR</td><td>0.963</td><td>0.918</td><td>0.905</td><td>0.872</td><td>0.787</td><td>0.617</td><td>0.445</td><td>0.272</td></tr><tr><td>SR</td><td>0.960</td><td>0.907</td><td>0.892</td><td>0.855</td><td>0.781</td><td>0.611</td><td>0.437</td><td>0.272</td></tr></table>
|
| 461 |
+
|
| 462 |
+
# D.4. Effect of Noisy, Duplicated, and Correlated Predictors
|
| 463 |
+
|
| 464 |
+
In addition to the extensive simulation settings described in Section 5.2, we further evaluate VIP Rank under alternative predictor structures that challenge common modeling assumptions:
|
| 465 |
+
|
| 466 |
+
- Baseline: $x_{1},\ldots ,x_{p}\stackrel {\mathrm{~iid}}{\sim}\mathrm{Unif}(0,1)$
|
| 467 |
+
- Noisy $X$ : Independent Gaussian noise is added to each predictor with variance equal to 1/5 of the signal variance
|
| 468 |
+
- Duplicated $X$ : A redundant feature is added: $x_{6} = x_{1} + x_{2}$ , where $x_{1}$ and $x_{2}$ are relevant predictors
|
| 469 |
+
Correlated $X\colon x_1,\ldots ,x_p\sim \mathrm{Unif}(0,1)$ with an autocorrelation structure: $\rho_{ij} = 0.9^{|i - j|}$
|
| 470 |
+
|
| 471 |
+
The response variable $y$ is generated according to the Friedman equation (1991):
|
| 472 |
+
|
| 473 |
+
$$
|
| 474 |
+
y = 1 0 \sin (\pi x _ {1} x _ {2}) + 2 0 (x _ {3} - 0. 5) ^ {2} + 1 0 x _ {4} + 5 x _ {5} + \varepsilon , \quad \varepsilon \sim N (0, \sigma^ {2}).
|
| 475 |
+
$$
|
| 476 |
+
|
| 477 |
+
We fix $n = 1000$ , $p = 100$ , $\mathrm{SNR} = 10$ , and repeat each scenario for 100 trials. Table 4 reports the average TPR and FPR. VIP Rank consistently identifies all relevant features across all scenarios, demonstrating strong robustness to noise, redundancy, and correlation among predictors.
|
| 478 |
+
|
| 479 |
+
Table 4: Average performance in each scenario across 100 trials.
|
| 480 |
+
|
| 481 |
+
<table><tr><td>Scenario</td><td>TPR</td><td>FPR</td></tr><tr><td>Baseline</td><td>100%</td><td>10.58%</td></tr><tr><td>Noisy X</td><td>100%</td><td>26.42%</td></tr><tr><td>Duplicated X</td><td>100%</td><td>11.11%</td></tr><tr><td>Correlated X</td><td>100%</td><td>15.98%</td></tr></table>
|
| 482 |
+
|
| 483 |
+
# D.5. Additional Performance Metrics for Operon vs PAN+Operon
|
| 484 |
+
|
| 485 |
+
Figures 11, 12, 13 show additional metrics not discussed in the main paper. Although PAN+Operon's solution rate plummeted from $\sim 27\%$ at $\mathrm{SNR} = \infty$ to $0\%$ at $\mathrm{SNR} = 20$ across all $n$ , Figure 11 shows there is still improvement in $R^2$ on test set across all $n$ and SNR, while improving model interpretability evidenced by the uniformly lower model size in Figure 12.
|
| 486 |
+
|
| 487 |
+

|
| 488 |
+
Figure 11: $R^2$ on test set with Operon as the SR module. Points indicate the average $R^2$ on test set and bars represent the $95\%$ confidence intervals.
|
| 489 |
+
|
| 490 |
+

|
| 491 |
+
Figure 12: Model size with Operon as the SR module. Points indicate the average model size and bars represent the $95\%$ confidence intervals.
|
| 492 |
+
|
| 493 |
+

|
| 494 |
+
Figure 13: Solution rate with Operon as the SR module. Points indicate the average solution rate and bars represent the $95\%$ confidence intervals.
|
abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:00b10f0b9a45765371a80a0f45f2cbc22c827d65d17d61125641269bee3ae9fa
|
| 3 |
+
size 993963
|
abinitiononparametricvariableselectionforscalablesymbolicregressionwithlargep/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:51ede053ce49afcdf6118c21d249d780a8f1ae06f39522a1b379b5471ae0c2cf
|
| 3 |
+
size 800765
|
abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/89ad1898-ec6d-430c-a0d7-cdc1fb5659fa_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:33b207d7474c36e2244edb7e149418213d49043db35e2658a7eb824430b64c1a
|
| 3 |
+
size 350143
|
abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/89ad1898-ec6d-430c-a0d7-cdc1fb5659fa_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c593f38714f8bc47b2771b1d1ab9a7bfa2849cd69d9e8915649232c13817f72b
|
| 3 |
+
size 424430
|
abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/89ad1898-ec6d-430c-a0d7-cdc1fb5659fa_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:83225f30b01acae912510db48b518ab047b09e6d3b7325aefbbf76a8031f7273
|
| 3 |
+
size 4525036
|
abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:98f5804d7214939d3df0aea427f9f5d5638af1d59e486482c1349da72b74536e
|
| 3 |
+
size 3279171
|
abkdpursuingaproperallocationoftheprobabilitymassinknowledgedistillationviadivergence/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d4e94951a55f72c1a0eec75caed4084d39d760b0fbeb7c1df2440b4b4021ddd6
|
| 3 |
+
size 2371685
|
abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/474f3ca7-b597-40ce-a816-f5a449c555ce_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c8a2506a248605cca7f55088c1f937f440cb40cdc1fc05ed9b4e08d2d2b8a27
|
| 3 |
+
size 139846
|
abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/474f3ca7-b597-40ce-a816-f5a449c555ce_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:906bb486389d63fdbd6ad1414d9dbdf6539f235614c62468cbec66a342d3a5b9
|
| 3 |
+
size 164121
|
abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/474f3ca7-b597-40ce-a816-f5a449c555ce_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c7eddcb1426ffe42653316d7e16b9a0ac182f1fbdfee573ba1a25cbf119d1134
|
| 3 |
+
size 1518672
|
abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/full.md
ADDED
|
@@ -0,0 +1,615 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ABNet: Adaptive explicit-Barrier Net for Safe and Scalable Robot Learning
|
| 2 |
+
|
| 3 |
+
Wei Xiao $^{1}$ Tsun-Hsuan Wang $^{1}$ Chuang Gan $^{2}$ Daniela Rus $^{1}$
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Safe learning is central to AI-enabled robots where a single failure may lead to catastrophic results. Existing safe learning methods are not scalable, inefficient and hard to train, and tend to generate unstable signals under noisy inputs that are challenging to be deployed for robots. To address these challenges, we propose Adaptive explicit-Barrier Net (ABNet) in which barriers explicitly show up in the closed-form model that guarantees safety. The ABNet has the potential to incrementally scale toward larger safe foundation models. Each head of ABNet could learn safe control policies from different features and focuses on specific part of the observation. In this way, we do not need to directly construct a large model for complex tasks, which significantly facilitates the training of the model while ensuring its stable output. Most importantly, we can still formally prove the safety guarantees of the ABNet. We demonstrate the efficiency and strength of ABNet in 2D robot obstacle avoidance, safe robot manipulation, and vision-based end-to-end autonomous driving, with results showing much better robustness and guarantees over existing models<sup>1</sup>.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
Robot learning usually requires to leverage scalable training and vast amount of data. There are many large models (Li et al., 2022) for complex robotic tasks including manipulation, locomotion, autonomous driving (Bommasani et al., 2021) (Singh et al., 2023) (Wang et al., 2023a). However, these models are not trustworthy and have no safety guarantees. Existing methods that incorporate guarantees or certificates into neural networks are not scalable and hard to
|
| 12 |
+
|
| 13 |
+
<sup>1</sup>Computer Science and Artificial Intelligence Lab, MIT, USA
|
| 14 |
+
<sup>2</sup>UMass Amherst and MIT-IBM Watson AI Lab, USA. Correspondence to: Wei Xiao <weixy@mit.edu>.
|
| 15 |
+
|
| 16 |
+
Proceedings of the $42^{nd}$ International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s).
|
| 17 |
+
|
| 18 |
+
1Code is available at: https://github.com/Weixy21/ ABNet
|
| 19 |
+
|
| 20 |
+
train (Pereira et al., 2020) (Xiao et al., 2023) (Wang et al., 2023b). It is desirable to merge these models as we can get better performance controllers in general (Beygelzimer et al., 2015) (Agarwal et al., 2020). Traditional mixture of expert methods (Shazeer et al., 2017) (Riquelme et al., 2021) (Zhou et al., 2022) or other merging approaches (Huang et al., 2023) (Ramé et al., 2023) (Wang et al., 2024) are not designed to retain the safety of the models. In this work, we explore to leverage the collective power of many safety-critical models to handle complex tasks while preserving the safety of the models.
|
| 21 |
+
|
| 22 |
+
There are various definitions of safety for robotics and autonomy, and safety can be basically defined as something bad never happens. Mathematically, safety can be defined as a continuously differentiable constraint with respect to the system state and it can be further captured by the forward invariance of the safe set over such a constraint (Ames et al., 2017) (Xiao & Belta, 2021) (Glotfelter et al., 2017). In other words, we can use different constraints and approaches to enforce safety. The way we learn such safety enforcement methods may depend on the focused observation feature. For instance, some human drivers may focus on the left lane boundary in driving in order to achieve safe lane keeping, while others may focus on the right lane boundary, as shown in Fig. 1. Merging these models enables us to build robust and powerful learning models. However, the adaptivity of the merging method to different safe models is crucial, especially in retaining safety.
|
| 23 |
+
|
| 24 |
+
In the literature, differentiable Quadratic Programs (dQP) (Amos & Kolter, 2017) and differentiable Model Predictive Control (dMPC) (Amos et al., 2018) are widely used for safe robot learning. However, dMPC is restricted to linear systems with linear constraints. Barrier-based learning methods (Robey et al., 2020) (Pereira et al., 2020) (Srinivasan et al., 2020), such as the BarrierNet (Xiao et al., 2023) (Wang et al., 2023b) (Liu et al., 2023), are widely used to transform nonlinear problems into dQPs and can equip deep learning systems with safety guarantees. However, there are several limitations of these learning methods: $(i)$ they are involved with solving batch QPs during training, which is inefficient, and dQPs tend to give awful solutions that significantly deteriorate the model; $(ii)$ they can only implement a single safety enforcement method as the last layer of the neural network, which is not scalable to larger safe learning
|
| 25 |
+
|
| 26 |
+
An Efficient Safety-Guaranteed Learning System with Scalability
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
Figure 1: The proposed ABNet that is efficient, scalable and generates stable output while guaranteeing safety for robots. Each head of ABNet in the model could learn safe control policies with focus on different observation features in a scalable or one-shot/direct manner. Barriers play the role of gates in determining the closed-form safe control and are more interpretable.
|
| 30 |
+
|
| 31 |
+
models; (iii) these methods tend to generate unstable output under noise, which cannot be deployed for robots.
|
| 32 |
+
|
| 33 |
+
In this paper, we propose the Adaptive explicit-Barrier Net (ABNet) to merge many safety-critical models while preserving the safety guarantees. The ABNet is efficient, scalable, robust to noise, and easy to be trained in an incremental manner. As shown in Fig. 1, we may build multi-head models within the ABNet. Each head of the ABNet may pay attention to different observation features to generate a safe control policy. We combine the outputs of all the safe learning models in a way that is provably safe. The weights of this combination quantify the importance of each head of the model, and they are trainable. The structure of the ABNet allows us to build larger safe foundation models for complicated robotic applications as we can incrementally train safe models corresponding to different robot skills and this will simply increase the head $h$ of the ABNet.
|
| 34 |
+
|
| 35 |
+
In summary, we make the following new contributions:
|
| 36 |
+
|
| 37 |
+
- We propose a novel explicit-Barrier model that shows superior performance in stable training and computation than dQP (Amos & Kolter, 2017) and BarrierNet (Xiao et al., 2023) while guaranteeing safety of robot learning, and the explicit-Barrier model is crucial to build larger models via merging due to its high-efficiency in dealing with nonlinear systems and constraints.
|
| 38 |
+
- We propose a novel ABNet that merges many safety-critical learning models, and this new model is scalable, robust, and easy to be trained.
|
| 39 |
+
- We formally prove the safety guarantees of the proposed ABNet.
|
| 40 |
+
- We demonstrate the strength and effectiveness of our model on a variety of robot control tasks, including 2D robot obstacle avoidance, safe robot manipulation, and vision-based end-to-end autonomous driving in an open dataset. We also show that existing models/policies merging could make safety worse in complicated tasks (such as
|
| 41 |
+
|
| 42 |
+
in vision-based driving).
|
| 43 |
+
|
| 44 |
+
# 2. Problem Formulation
|
| 45 |
+
|
| 46 |
+
We consider the following safe robot learning problem:
|
| 47 |
+
|
| 48 |
+
Problem. Given (a) a robotic system with dynamics; (b) a state-feedback nominal controller $\pi^{*}(\pmb{x}) = \pmb{u}^{*}$ (such as a model predictive controller) that provides the training label; (c) a set of safety constraints $b_{j}(\pmb{x})\geq 0,j\in S(b_{j}$ is continuously differentiable, $S$ is a constraint set); (d) a neural network controller $\pi (\pmb {x},\pmb {z}|\theta) = \pmb{u}$ parameterized by $\theta$ (under observation $\pmb{z}$ );
|
| 49 |
+
|
| 50 |
+
Our goal is to find the optimal parameter
|
| 51 |
+
|
| 52 |
+
$$
|
| 53 |
+
\theta^ {*} = \arg \min _ {\theta} \mathbb {E} _ {\boldsymbol {x}, \boldsymbol {z}} [ \ell (\pi^ {*} (\boldsymbol {x}), \pi (\boldsymbol {x}, \boldsymbol {z} | \theta)) ], \tag {1}
|
| 54 |
+
$$
|
| 55 |
+
|
| 56 |
+
while satisfying all the safety constraints in (c) and the dynamics constraint (a). $\mathbb{E}$ is the expectation, and $\ell$ is a loss function.
|
| 57 |
+
|
| 58 |
+
# 3. Adaptive Explicit-Barrier Net
|
| 59 |
+
|
| 60 |
+
In this section, we present the architecture of the Adaptive explicit-Barrier Net (ABNet) and formally prove its safety guarantees in learning systems.
|
| 61 |
+
|
| 62 |
+
Our proposed method can fuse machine learning models that can strictly enforce system safety. In the literature, to make the safety model trainable without losing guarantees, we would usually require the model to be in the form of differentiable convex optimizations, such as differentiable QP (Amos & Kolter, 2017), differentiable MPC (Amos et al., 2018) or differentiable CBF (Xiao et al., 2023). In the former two cases, the considered robot learning problems are usually with linear dynamics and linear constraints. Otherwise, the optimization becomes nonlinear (i.e., not trainable in neural networks). Although one can transform constrained optimizations into unconstrained optimizations that are trainable using classical barrier functions (Boyd &
|
| 63 |
+
|
| 64 |
+
Vandenberghe, 2004), it may make the system lose safety guarantees.
|
| 65 |
+
|
| 66 |
+
# 3.1. Multi-head Explicit-Barrier
|
| 67 |
+
|
| 68 |
+
We focus on general safe robot learning problems with nonlinear dynamics and constraints. For such problems, it has been shown that we can use the CBF transformation to reduce nonlinear optimizations onto quadratic optimizations with safety guarantees (Ames et al., 2017) (Xiao & Belta, 2021), which gives rise to the so-called BarrierNet (Xiao et al., 2023).
|
| 69 |
+
|
| 70 |
+
Specifically, consider robot dynamics as: $\dot{\pmb{x}} = f(\pmb{x}) + g(\pmb{x})\pmb{u}$ , where $\pmb{x} \in \mathbb{R}^n$ is the robot state, $f: \mathbb{R}^n \to \mathbb{R}^n$ and $g: \mathbb{R}^n \to \mathbb{R}^{n \times q}$ are locally Lipschitz, and $\pmb{u} \in \mathbb{R}^q$ is the control. We can also consider non-affine control systems by defining auxiliary systems (Xiao et al., 2021).
|
| 71 |
+
|
| 72 |
+
Implicit-Barrier. The constrained optimal control in the considered problem in Sec. 2 is then transformed into the following differentiable CBF/BarrierNet (Xiao et al., 2023), which may form a head of the model:
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
\boldsymbol {u} _ {k} = \arg \min _ {\boldsymbol {u} (t)} \frac {1}{2} \boldsymbol {u} (t) ^ {T} H \left(\boldsymbol {z} _ {k} \mid \theta_ {h, k}\right) \boldsymbol {u} (t) + F ^ {T} \left(\boldsymbol {z} _ {k} \mid \theta_ {f, k}\right) \boldsymbol {u} (t) \tag {2}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
s.t.
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
\begin{array}{l} L _ {f} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) + \left[ L _ {g} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) \right] \boldsymbol {u} \\ + p _ {m, k} \left(\boldsymbol {z} _ {k} \mid \theta_ {p, k} ^ {m}\right) \alpha_ {j, m} \left(\psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} \mid \theta_ {p})\right) \geq 0, j \in S, \\ \psi_ {j, i} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) = \dot {\psi} _ {j, i - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) \\ + p _ {i} (\boldsymbol {z} | \theta_ {p} ^ {i}) \alpha_ {j, i} \left(\psi_ {j, i - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p})\right), i \in \{1, \dots , m - 1 \}, j \in S, \\ \psi_ {j, 0} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) = b _ {j} (\boldsymbol {x}), j \in S, \tag {3} \\ \end{array}
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
where $H(\pmb{z}_k|\theta_{h,k}) \in \mathbb{R}^{q \times q}$ is positive definite, and $-H^{-1}(\pmb{z}_k|\theta_{h,k})F(\pmb{z}_k|\theta_{f,k})$ can be interpreted as a reference control (the output of previous network layers). The constraints above are the High-Order CBFs (HOCBFs) constructed in enforcing the safety constraints $b_j(\pmb{x}) \geq 0$ of relative degree $m$ (Xiao & Belta, 2021). It can be shown that the satisfaction of the first HOCBF constraint in the above implies the satisfaction of $b_j(\pmb{x}) \geq 0, \forall j \in S$ , which proves the safety of differentiable CBF. In the above, $L_f\psi = \frac{d\psi}{dx} f(\pmb{x}), L_g\psi = \frac{d\psi}{dx} g(\pmb{x}), k \in \{1, \dots, h\}$ , and $h$ is the number of heads (as shown in Fig. 1). $p_i \geq 0, i \in \{1, \dots, m-1\}, p_{m,k} \geq 0$ are penalty functions (outputs of the previous network, as shown in Fig. 2) on the strictly increasing and zero-passing functions $\alpha_{j,i}, i \in \{1, \dots, m\}, j \in S$ , and will determine the conservativeness of the robot. $\theta := (\theta_h, k, \theta_{f,k}, \theta_{p,k}^m, \theta_p), k \in \{1, \dots, h\}$ , where $\theta_p := (\theta_p^1, \dots, \theta_p^{m-1})$ are all trainable parameters. $z_k$ is the observation of the head $k$ , and it is possible that all heads share the same observation, i.e. $z_k = z, \forall k \in \{1, \dots, h\}$ .
|
| 85 |
+
|
| 86 |
+
The training of the above differentiable CBF (3) involves
|
| 87 |
+
|
| 88 |
+

|
| 89 |
+
Figure 2: Architecture of multi-head explicit-barriers. ABNet is capable (adaptive) to fuse any safe learning models, such as the proposed explicit-barriers, BarrierNet, dMPC, etc. The ABNet is usually used in conjunction with any other neural networks and can be implemented in parallel. The parameters (inputs) of each head of ABNet are the outputs of previous layers (such as CNN or LSTM).
|
| 90 |
+
|
| 91 |
+
solving batch QPs (Amos & Kolter, 2017), which is inefficient. Since CBFs do not explicitly show up in the solution, we call differentiable CBF (3) as implicit-barrier. In the following, we derive the trainable explicit solution of the differentiable CBF, which is our proposed explicit-barrier.
|
| 92 |
+
|
| 93 |
+
Explicit-Barrier. It has been shown in (Luenberger, 1997) that we can find the explicit solution of a QP if there are only two constraints. As the cardinality of the safety constraint set $S$ may be greater than two, the number of HOCBFs in the differentiable CBF (3) will also be greater than two. In order to address this, first, we can define two safety functions $b_{I}(\pmb{x}) = -\ln(\sum_{j \in S_{1}} \exp(-b_{j}(\pmb{x})))$ and $b_{II}(\pmb{x}) = -\ln(\sum_{j \in S_{2}} \exp(-b_{j}(\pmb{x})))$ , where $S = S_{1} \cup S_{2}$ . By (Boyd & Vandenberghe, 2004), we have that $\max_{j \in S} b_{j}(\pmb{x}) \leq \ln(\sum_{j \in S} \exp(b_{j}(\pmb{x})))$ . It can be easily shown that $b_{I}(\pmb{x}) \geq 0$ and $b_{II}(\pmb{x}) \geq 0$ implies $b_{j}(\pmb{x}) \geq 0, \forall j \in S$ .
|
| 94 |
+
|
| 95 |
+
Alternatively, we can simply consider the two most risk safety specifications, i.e., $b_{I}(\pmb{x}) = \min_{i\in S}b_{j}(\pmb{x}), b_{II}(\pmb{x}) = \min_{i\in S\setminus \arg \min_{i\in S}b_{j}(\pmb{x})}b_{j}(\pmb{x})$ . This approach is simpler and it works well for most obstacle avoidance tasks.
|
| 96 |
+
|
| 97 |
+
Then, the explicit optimal solution of the differentiable CBF with the above two safety specifications can be given by
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\begin{array}{l} u _ {k} = - \lambda_ {1} (\boldsymbol {x}) H ^ {- 1} L _ {g} \psi_ {I, m - 1} (\boldsymbol {x}) \\ - \lambda_ {2} (\boldsymbol {x}) H ^ {- 1} L _ {g} \psi_ {I I, m - 1} (\boldsymbol {x}) - H ^ {- 1} F, \\ \end{array}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where $H, F$ are given as in (2) with arguments omitted. $\lambda_1(\pmb{x}) \leq 0, \lambda_2(\pmb{x}) \leq 0$ are two gate functions ((14), (15) given in Appendix), and $\psi_{I,m-1}(\pmb{x}), \psi_{II,m-1}(\pmb{x})$ are the two HOCBFs corresponding to $b_I(\pmb{x}), b_{II}(\pmb{x})$ defined similarly as in (3). As we can see that $b_I(\pmb{x}), b_{II}(\pmb{x})$ explicitly show up in the above equation while it guarantees safety, we call it explicit-Barrier. This makes the explicit-Barrier more interpretable. The whole explicit solution derivation process is given in Appendix Sec. A.
|
| 104 |
+
|
| 105 |
+
Adaptive mechanism. Each head of ABNet may learn different safe control policies even if all the heads have the same observation $z$ . The benefit is that the final performance is achieved "collectively" by all heads and thus each head can just focus on the "subproblem" with safety. Alternatively, we may also make each head of ABNet focus on different observations $z_{k}$ . The observation $z_{k}$ may come from different parts of the sensor observation (such as the left lane boundary and right lane boundary in driving shown in Fig. 1), or even different perceptions (such as vision, lidar, etc.)
|
| 106 |
+
|
| 107 |
+
Cross connection. It can be noted from (3) that each head of ABNet $k \in \{1, \dots, h\}$ has some cross connection with other heads, as also shown in Fig. 1. In other words, $\psi_{j,i}(\boldsymbol{x}, \boldsymbol{z} | \theta_p), i \in \{1, \dots, m-1\}, j \in S$ are formulated in the same way through the shared parameter $\theta_p$ (independent from $k$ ). This is to ensure (i) the construction for provable safety (as shown later), and (ii) some shared information across different heads of ABNet as they all generate safe controls for the robot.
|
| 108 |
+
|
| 109 |
+
Fusion. Another important consideration is how should we fuse all these controls $\pmb{u}_k, k \in \{1, \dots, h\}$ while preserving the safety property of each head of the ABNet. We propose the following form:
|
| 110 |
+
|
| 111 |
+
$$
|
| 112 |
+
\boldsymbol {u} = \sum_ {k = 1} ^ {h} w _ {k} \boldsymbol {u} _ {k}, \quad \text {w h e r e} \sum_ {k = 1} ^ {h} w _ {k} = 1. \tag {5}
|
| 113 |
+
$$
|
| 114 |
+
|
| 115 |
+
In the above, $w_{k} \geq 0, k \in \{1, \dots, h\}$ are trainable parameters. The composition of explicit-Barrier (4), BarrierNet, and dMPCs, etc, in the form of (5) is our proposed ABNet, as shown in Fig. 2. The safety guarantees of the ABNet is shown in the following theorem:
|
| 116 |
+
|
| 117 |
+
Theorem 3.1. (Safety of ABNets) Given the multi-head ABNet formulated as in (4) and all other safe learning models (BarrierNet, dMPC, etc.). If the system is initially safe (i.e., $b_{j}(\boldsymbol{x}(t_{0})) \geq 0, \forall j \in S$ ), then a control policy $\boldsymbol{u}$ from the ABNet output (5) guarantees the safety of system, i.e., $b_{j}(\boldsymbol{x}(t)) \geq 0, \forall j \in S, \forall t \geq t_{0}$ .
|
| 118 |
+
|
| 119 |
+
All the proofs for theorems are given in Appendix B. If the system is not initially safe (i.e., $b_{j}(\boldsymbol{x}(t_{0})) < 0, \exists j \in S$ ), then the system state $\boldsymbol{x}$ will be driven to the safe side of the state space due to the Lyapunov property of CBF/HOCBFs (Ames et al., 2017) (Xiao & Belta, 2021). This enables the possibility of utilizing data that violates safety to conduct adversary training of the ABNet.
|
| 120 |
+
|
| 121 |
+
Natural noise filter. The ABNet is a natural noise filter since $w_{k} \in [0,1], \forall k \in \{1, \dots, h\}$ in (5). This can ensure that the output $\pmb{u}$ of the model is stable with a large enough head number $h$ if all the heads have different observation $z_{k}$ for the current environment. This feature makes ABNet a very robust and adaptive controller for robotic systems, and thus,
|
| 122 |
+
|
| 123 |
+
# Algorithm 1 Construction and training of ABNet
|
| 124 |
+
|
| 125 |
+
Input: the problem setup (a)-(d) given in the problem formulation (Sec. 2).
|
| 126 |
+
|
| 127 |
+
Output: a robust and safe controller $\pmb{u}$ for the system.
|
| 128 |
+
|
| 129 |
+
(a) Formulate each head of explicit-Barriers as in (4).
|
| 130 |
+
(b) Build the cross connection among explicit-Barriers via $p_i(\mathbf{z}|\theta_p^i), i \in \{1, \dots, m-1\}$ .
|
| 131 |
+
(c) Fuse all the heads of explicit-Barriers as in (5).
|
| 132 |
+
|
| 133 |
+
# if Incremental training then
|
| 134 |
+
|
| 135 |
+
Decouple $p_i(z|\theta_p^i), i \in \{1, \dots, m - 1\}$ and define them for each explicit-Barrier.
|
| 136 |
+
|
| 137 |
+
Train each head of explicit-Barriers, respectively.
|
| 138 |
+
|
| 139 |
+
Choose a $p_i(\boldsymbol {z}|\theta_p^i),i\in \{1,\dots ,m - 1\}$ from one of the explicit-Barriers to build cross connection.
|
| 140 |
+
|
| 141 |
+
Fuse all the explicit-Barriers via (6).
|
| 142 |
+
|
| 143 |
+
# else
|
| 144 |
+
|
| 145 |
+
Directly train the ABNet via reverse mode error back propagation.
|
| 146 |
+
|
| 147 |
+
# end if
|
| 148 |
+
|
| 149 |
+
ABNet can generate smooth signals.
|
| 150 |
+
|
| 151 |
+
Theorem 3.2. (Safety of merging of ABNets) Given two ABNets, the merged model using the form as in (5) again guarantees the safety of system.
|
| 152 |
+
|
| 153 |
+
# 3.2. Model Training
|
| 154 |
+
|
| 155 |
+
The ABNet can be trained incrementally or in one-shot. This is due to the fact that each head of ABNet can generate a control policy that is applicable to the system. The linear combination weights $w_{k}, k \in \{1, \dots, h\}$ in the ABNet denote the importance of the corresponding control policies.
|
| 156 |
+
|
| 157 |
+
Incremental training. In ABNet, we may train each head $k,k\in \{1,\dots ,h\}$ of the model in a scalable way as we wish to minimize the loss between their output $\pmb{u}_k$ and the label $\pmb{u}^*$ as well. The training can be done by directly incorporating the explicit-Barrier (4) into the model. There are some cross connections via $p_i(z|\theta_p)$ between explicit-Barriers in the ABNet that may prevent the implementation of the training. We may address this by training a $p_i(z|\theta_p)$ for each head of the ABNet. After we train all heads of the model, we may fix the parameters of those models, choose a $p_i(z|\theta_p)$ from one of the explicit-Barriers (or take an average of all $p_i(z|\theta_p)$ among the models) to build the cross connection, and train the weights $w_{i}$ for some more iterations. Another way is to fuse these explicit-Barriers by testing loss. In other words, the weight $w_{k},k\in \{1,\ldots ,h\}$ can be determined by:
|
| 158 |
+
|
| 159 |
+
$$
|
| 160 |
+
w _ {k} = \frac {1 / \ell_ {k} \left(\boldsymbol {u} _ {k} , \boldsymbol {u} ^ {*}\right)}{\sum_ {k = 1} ^ {h} 1 / \ell_ {k} \left(\boldsymbol {u} _ {k} , \boldsymbol {u} ^ {*}\right)}, \tag {6}
|
| 161 |
+
$$
|
| 162 |
+
|
| 163 |
+
where $\ell_{k}$ is a loss function.
|
| 164 |
+
|
| 165 |
+
If we already have some trained ABNet, and we wish to
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Figure 3: Computation (upper, numbers in the bracket denote variance) and training efficiency (lower, numbers in the bracket denote testing loss) comparison of our proposed explicit-Barrier (ABNet) with dQP and BarrierNet (BNet). The use of dQP in BarrierNet could give very bad solutions. NN is a normal neural network without safety guarantees.
|
| 171 |
+
|
| 172 |
+
add some new capabilities (such as safe driving by only focusing on the left lane boundary) to the model, then we can train some heads of ABNets based on the new data we have. Finally, we can fuse the models similarly with safety guarantees as shown in Thm. 3.2. This shows the scalability of the proposed ABNet that allows us to build larger foundational safe models in an incremental way.
|
| 173 |
+
|
| 174 |
+
One-shot/Direct training. The one-shot training of the ABNet can be directly done using the traditional reverse mode automatic differentiation. In addition to the loss between the eventual output $\pmb{u}$ of the ABNet and the label $\pmb{u}^{*}$ , we may also consider the losses on $\pmb{u}_k, k\in \{1,\dots,h\}$ , as well as on the reference controls $H^{-1}(z_k|\theta_{h,k})F(z_k|\theta_{f,k})$ , in order to improve the training performance.
|
| 175 |
+
|
| 176 |
+
The construction and training of the ABNet involve the formulation of each head of explicit-Barriers as in (4), the model fusion as in (5), and the scalable or direct training as shown above (Alg. 1).
|
| 177 |
+
|
| 178 |
+
# 4. Experiments
|
| 179 |
+
|
| 180 |
+
In this section, we conduct several experiments to answer the following questions:
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
Figure 4: 2D robot obstacle avoidance closed-loop testing control profiles (upper) and ABNet performance with the increasing of ABNet heads using scalable training (lower). This scalable training for ABNet is with safety guarantees. The controls are subject to input noise, and thus are nonsmooth.
|
| 186 |
+
|
| 187 |
+
- How does the proposed explicit-Barrier compare with dQP (Amos & Kolter, 2017) and BarrierNet (Xiao & Belta, 2021) in terms of computation and training efficiency?
|
| 188 |
+
- Does our method match the theoretic results in experiments and is it scalable?
|
| 189 |
+
- How does our method compare with state-of-the-art models in enforcing safety constraints?
|
| 190 |
+
- The benefit of models/policies merging and the robustness of our models in safety and smoothness?
|
| 191 |
+
|
| 192 |
+
Benchmark models: We compare with (i) baseline: Tables 1, 2–single end-to-end learning model (E2E) (Levine et al., 2016) and Table 3–single vanilla end-to-end (V-E2E) model (Amini et al., 2022), (ii) safety guaranteed models: (implicit-) BarrierNet (BNet) (Xiao et al., 2023), Deep forward and backward (DFB) model (Pereira et al., 2020), (iii) policies merging: BarrierNet policies merged with uncertainty propagation (BNet-UP) (Wang et al., 2023b) that employs Gaussian kernels with Scott's rule (Scott, 2015) to select the bandwidth, (iv) models merging: E2Es merged with Monte-Carlo Dropout (E2Es-MCD) (Gal & Ghahramani, 2016), E2Es merged with Deep Resembles (E2Es-DR) (Lakshminarayanan et al., 2017).
|
| 193 |
+
|
| 194 |
+
Table 1: 2D robot obstacle avoidance closed-loop testing under noisy input.
|
| 195 |
+
|
| 196 |
+
<table><tr><td>MODEL</td><td>SAFETY(≥0)</td><td>CONSER.(≥0&↓)</td><td>MSE(↓)</td><td>u1UNCERTAINTY(↓)</td><td>u2UNCERTAINTY(↓)</td><td>THEORET.GUAR.</td></tr><tr><td>E2E (LEVINE ET AL., 2016)</td><td>-14.140</td><td>-2.976±3.770</td><td>0.007±0.004</td><td>0.063</td><td>0.049</td><td>×</td></tr><tr><td>E2ES-MCD (GAL &GHAHRAMANI, 2016)</td><td>-2.087</td><td>-1.341±0.824</td><td>0.004±0.001</td><td>0.041</td><td>0.026</td><td>×</td></tr><tr><td>E2ES-DR (LAKSHMINARAYANANEt AL., 2017)</td><td>-35.130</td><td>-3.176±4.299</td><td>0.080±0.006</td><td>0.032</td><td>0.020</td><td>×</td></tr><tr><td>DFB (PEREIRA ET AL., 2020)</td><td>36.659</td><td>47.810±4.377</td><td>0.013±0.003</td><td>0.062</td><td>0.052</td><td>√</td></tr><tr><td>BNET (XIAO ET AL., 2023)</td><td>5.045</td><td>7.966±1.287</td><td>0.014±0.006</td><td>0.074</td><td>0.047</td><td>√</td></tr><tr><td>BNET-UP (WANG ET AL., 2023B)</td><td>5.988</td><td>8.573±1.738</td><td>0.008±0.004</td><td>0.054</td><td>0.028</td><td>×</td></tr><tr><td>ABNET-10-SC (OURS)</td><td>5.731</td><td>6.269±0.319</td><td>0.011±0.007</td><td>0.065</td><td>0.027</td><td>√</td></tr><tr><td>ABNET-10 (OURS)</td><td>12.639</td><td>13.887±1.323</td><td>0.008±0.005</td><td>0.049</td><td>0.030</td><td>√</td></tr><tr><td>ABNET-100 (OURS)</td><td>10.122</td><td>11.729±0.816</td><td>0.012±0.006</td><td>0.049</td><td>0.013</td><td>√</td></tr></table>
|
| 197 |
+
|
| 198 |
+
Our models: We consider the minimum function method in determining $b_{I}(\pmb{x})$ and $b_{II}(\pmb{x})$ . Sec. 4.2 and 4.3: ABNet trained in a scalable way with 10 heads (ABNET-10-SC), ABNet trained in one shot with 10 heads (ABNET-10), ABNet trained in one shot with 100 heads (ABNET-100). Sec. 4.4: our ABNet trained in one shot with 10 heads using the same input images (ABNET), ABNet with attention images and 10 heads (ABNET-ATT), our ABNet first trained with ABNET scaled by ABNET-ATT (20 heads, ABNET-SC).
|
| 199 |
+
|
| 200 |
+
Evaluation metrics: The evaluation metrics are defined as follows: mean square error of the model testing (MSE), satisfaction of safety constraints where non-negative values mean safety guarantees (SAFETY), system conservativeness (CONSER.), steering control $u_{1}$ uncertainty ( $u_{1}$ UNCERTAINTY), acceleration control $u_{2}$ uncertainty ( $u_{2}$ UNCERTAINTY), and theoretical safety guarantees (THEORET. GUAR.) respectively. The metrics are explicitly defined in Appendix C.
|
| 201 |
+
|
| 202 |
+
# 4.1. Computation and Training Time
|
| 203 |
+
|
| 204 |
+
We first compare the training stability and efficiency of our proposed explicit-Barrier (or ABNet) with dQP (Amos & Kolter, 2017) and BarrierNet (Xiao et al., 2023). The dQP method is based on the "QPFunction" library from OptNet (Amos & Kolter, 2017), and BarrierNet is based on dQP. The computation times under different batch sizes are shown in Fig. 3 (upper). The computation time significantly increases as the increasing of the batch size, but the proposed explicit-Barrier remains to be efficient. Fig. 3 (lower) shows the training time (of the model based on the 2D robot case in Sec. 4.2) under different number of heads. The training time of our proposed ABNet is comparable to a normal NN (NN is without safety guarantees). While the BarrierNet (based on dQP) tends to give very bad training and testing solutions, as also shown in Fig. 7 in the Appendix, which significantly deteriorates the quality of model training.
|
| 205 |
+
|
| 206 |
+
# 4.2. 2D Robot Obstacle Avoidance
|
| 207 |
+
|
| 208 |
+
We aim to find a neural network controller for a 2D robot that can drive the robot from an initial location to an arbitrary destination while avoiding crash onto the obstacle. All the models (h copies/heads) have the same input (with uniformly distributed noise, $10\%$ of the input magnitude in testing). The detailed problem setup and model introductions are given in Appendix C.2.
|
| 209 |
+
|
| 210 |
+

|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
Figure 5: Robot manipulation closed-loop end-effector trajectories (upper) and ABNet performance with the increasing of model heads using scalable training (lower). The transparent trajectories in the upper figure are corresponding to results in all runs.
|
| 214 |
+
|
| 215 |
+
Models/policies merging can improve the performance as
|
| 216 |
+
|
| 217 |
+
Table 2: Robot manipulation closed-loop testing under noisy input and comparisons with benchmarks.
|
| 218 |
+
|
| 219 |
+
<table><tr><td>MODEL</td><td>SAFETY(≥0)</td><td>CONSER.(≥0&↓)</td><td>MSE(↓)</td><td>u1UNCERTAINTY(↓)</td><td>u2UNCERTAINTY(↓)</td><td>THEORET.GUAR.</td></tr><tr><td>E2E (LEVINE ET AL., 2016)</td><td>-11.027</td><td>-1.082±2.992</td><td>3.6e-4±1.7e-4</td><td>0.013</td><td>0.009</td><td>×</td></tr><tr><td>E2ES-MCD (GAL &GHAHRAMANI, 2016)</td><td>-11.827</td><td>0.162±2.085</td><td>1.1e-4±7.3e-5</td><td>0.008</td><td>0.005</td><td>×</td></tr><tr><td>E2ES-DR (LAKSHMINARAYANAT EL., 2017)</td><td>-11.381</td><td>-0.958±1.875</td><td>1.3e-4±8.5e-5</td><td>0.007</td><td>0.005</td><td>×</td></tr><tr><td>DFB (PEREIRA ET AL., 2020)</td><td>2.905</td><td>6.023±3.110</td><td>8.7e-4±1.9e-4</td><td>0.019</td><td>0.018</td><td>✓</td></tr><tr><td>BNET (XIAO ET AL., 2023)</td><td>0.147</td><td>0.745±0.505</td><td>2.3e-4±1.2e-4</td><td>0.010</td><td>0.009</td><td>✓</td></tr><tr><td>BNET-UP (WANG ET AL., 2023B)</td><td>0.206</td><td>0.346±0.098</td><td>5.2e-5±3.2e-5</td><td>0.005</td><td>0.005</td><td>×</td></tr><tr><td>ABNET-10-SC (OURS)</td><td>0.233</td><td>0.570±0.360</td><td>5.9e-5±5.5e-5</td><td>0.006</td><td>0.005</td><td>✓</td></tr><tr><td>ABNET-10 (OURS)</td><td>0.039</td><td>0.272±0.443</td><td>1.2e-4±9.6e-5</td><td>0.008</td><td>0.007</td><td>✓</td></tr><tr><td>ABNET-100 (OURS)</td><td>0.053</td><td>0.123±0.177</td><td>1.1e-4±4.4e-5</td><td>0.005</td><td>0.004</td><td>✓</td></tr></table>
|
| 220 |
+
|
| 221 |
+
shown by the MSE metrics in Table 1 and the scalable training in Fig. 4. Note that our scalable training for ABNets has safety guarantees. The DFB tends to be very conservative as the CBFs within which are not differentiable, which presents a high conservative value shown in Table 1. Our proposed ABNets can significantly reduce the uncertainty of the outputs (controls) under noisy input while guaranteeing safety, and this uncertainty decreases as the increases of the heads in the ABNets, as shown by the last two and three columns in Table 1, as well as shown in Fig. 4 and 8 of Appendix C.2 where the control uncertainty of ABNet-100 is lower than the one of BNet. The smoothness of the controls also increases with the increase of model heads (e.g., blue from ABNet v.s. red from BNet in Fig. 8). In terms of performance, our proposed ABNets can also improve the testing errors compared to BNet and DFB, as shown by the MSE in Table 1. The E2Es-MCD model can achieve the best performance, but this is at the cost of safety (the SAFETY metric in Table 1 is negative, which implies violated safety).
|
| 222 |
+
|
| 223 |
+
# 4.3. Safe Robot Manipulation
|
| 224 |
+
|
| 225 |
+
In robot manipulation, we employ a two-link planar robot manipulator to grasp an object from an arbitrary point to an arbitrary destination while avoiding crashing onto obstacles. All the models (h copies/heads) have the same input (with uniformly distributed noise, $10\%$ of the input magnitude in testing). We compare our proposed ABNets with the same benchmark models as in the last subsection. More detailed problem setup and model introductions are given in Appendix C.3.
|
| 226 |
+
|
| 227 |
+
Again, models/policies merging can improve the performance as shown by the MSE metrics in Table 2 and the scalable training in Fig. 5. All the E2E-related models are not robust to noise and violate safety constraints (i.e., crash onto obstacles) under noisy input since there are no formal guarantees, and such an example is shown by the magenta trajectory curve of the end-effector in Fig. 5. As shown in
|
| 228 |
+
|
| 229 |
+
Table 2, the proposed ABNet-100 model is the least conservative one with the lowest control uncertainties as well under noisy inputs (significantly improved compared with BNet and DFB), which demonstrates its advantage over other models. This uncertainty improvement is also shown by the control distributions in Fig. 9 in Appendix C.3 (BNet: red area v.s. ABNet-100: blue area). The BNet-UP achieves the best performance without safety guarantees.
|
| 230 |
+
|
| 231 |
+
# 4.4. Vision-based End-to-End Autonomous Driving
|
| 232 |
+
|
| 233 |
+
We finally test our models in a more complicated and realistic task: vision-based driving, using an open dataset and benchmark from the VISTA (Amini et al., 2022). One of ABNets, named ABNet-att, is constructed such that different heads focus on different parts of the image (left lane boundary, right lane boundary, etc., the corresponding images are shown in Fig 10 of Appendix C.4). For more experiment and model details, please refer to Appendix C.4.
|
| 234 |
+
|
| 235 |
+
As shown in Table 3, the proposed ABNets can avoid crash onto obstacles with $100\%$ obstacle passing rate, including the ABNet-sc that is trained in a scalable way with two ABNets (also shown by the scalable training in Fig. 6). This is because the ABNets can learn the correct steering control (the blue and green sine waves shown in Fig. 11 (right) in Appendix C.4) to avoid the obstacle without stopping in front of it. Compared to the baseline MPC, the proposed ABNet is much more efficient (0.004s v.s. 0.872s). Although linearization is possible in MPC to improve the efficiency, it may make the MPC lose safety guarantees. The DFB and BNet-related models learn a significant deceleration control (shown in Fig. 11) to avoid crashing onto obstacles, which explains why the corresponding obstacle passing rates are low compared to other models in Table 3 and why the blue trajectories (BNet) terminate near the obstacle in Fig. 6 (upper). Nonetheless, there are still some crash cases in DFB and BNet models due to badly learned CBF parameters that make the inter-sampling effect (i.e., safety violation
|
| 236 |
+
|
| 237 |
+
Table 3: Vision-based end-to-end autonomous driving closed-loop testing and comparisons with benchmarks. New items are short for obstacle crash rate (CRASH), obstacle passing rate (PASS).
|
| 238 |
+
|
| 239 |
+
<table><tr><td>MODEL</td><td>CRASH(↓)</td><td>PASS(↑)</td><td>SAFETY(≥0)</td><td>CONSER.(≥0&↓)</td><td>u1UNCERTAINTY(↓)</td><td>u2UNCERTAINTY(↓)</td><td>THEORET.GUAR.</td></tr><tr><td>V-E2E (AMINI ET AL., 2022)</td><td>6%</td><td>94%</td><td>-60.297</td><td>-0.610±21.165</td><td>0.443</td><td>0.222</td><td>×</td></tr><tr><td>E2ES-MCD (GAL &GHAHRAMANI, 2016)</td><td>8%</td><td>92%</td><td>-60.566</td><td>-2.211±22.343</td><td>0.429</td><td>0.227</td><td>×</td></tr><tr><td>E2ES-DR (LAKSHMINARAYANAT EL., 2017)</td><td>9%</td><td>91%</td><td>-60.572</td><td>-1.499±21.500</td><td>0.431</td><td>0.224</td><td>×</td></tr><tr><td>DFB (PEREIRA ET AL., 2020)</td><td>4%</td><td>39%</td><td>-18.114</td><td>-0.828±5.444</td><td>0.513</td><td>0.125</td><td>✓</td></tr><tr><td>BNET (XIAO ET AL., 2023)</td><td>3%</td><td>33%</td><td>-16.694</td><td>-4.882±4.817</td><td>0.724</td><td>0.385</td><td>✓</td></tr><tr><td>BNET-UP (WANG ET AL., 2023B)</td><td>2%</td><td>35%</td><td>-23.252</td><td>-5.190±4.920</td><td>0.726</td><td>0.532</td><td>×</td></tr><tr><td>ABNET (OURS)</td><td>0%</td><td>100%</td><td>1.455</td><td>6.132±2.181</td><td>0.168</td><td>0.316</td><td>✓</td></tr><tr><td>ABNET-ATT (OURS)</td><td>0%</td><td>100%</td><td>4.198</td><td>8.053±1.449</td><td>0.172</td><td>0.269</td><td>✓</td></tr><tr><td>ABNET-SC (OURS)</td><td>0%</td><td>100%</td><td>2.221</td><td>7.224±1.667</td><td>0.130</td><td>0.256</td><td>✓</td></tr></table>
|
| 240 |
+
|
| 241 |
+
between discretized times) serious. Most importantly, our proposed ABNet can learn less uncertain controls for this complicated task, as shown in Table 3, the scalable training in Fig. 6, and Fig. 11 (e.g., ABNet:blue or ABNet-att:green area v.s. BNet: red area).
|
| 242 |
+
|
| 243 |
+

|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
Figure 6: Vision-based end-to-end autonomous driving closed-loop testing trajectories in VISTA (upper) and ABNet performance with the increasing of model heads using scalable training (lower). This scalable training is done by both the ABNet and ABNet-att in Table 3 with safety guarantees.
|
| 247 |
+
|
| 248 |
+
The ABNet-att can learn more consistent autonomous driving behavior than the ABNet due to the image attention
|
| 249 |
+
|
| 250 |
+
setting, as shown by the magenta (ABNet-att) and cyan (ABNet) trajectories in Fig. 6 (upper) and the green (ABNet-att) and blue (ABNet) areas in Fig. 11. Ablation studies on the robustness of our ABNets in terms of safety under high-noisy inputs (50% noise level) are given in Table 4 of Appendix C.4.
|
| 251 |
+
|
| 252 |
+
# 5. Related Works
|
| 253 |
+
|
| 254 |
+
Scalability, merging and uncertainty in safe robot learning. Machine learning has been widely used in robot control (Bommasani et al., 2021) (Singh et al., 2023) (Wang et al., 2023a). However, there is increasing concern for machine learning, especially large foundation models, being used in robotics (Bommasani et al., 2021). Mixture of expert methods (Shazeer et al., 2017) (Riquelme et al., 2021) (Zhou et al., 2022) are scalable but hard to retain the property (such as safety) of the models. The uncertainty resulting from noisy model input or dataset is preventing the deployment to real robots (Loquercio et al., 2020) (Kahn et al., 2017). To address this, predictive uncertainty quantification (Gal & Ghahramani, 2016) (Lakshminarayanan et al., 2017), also a model merging approach, has been widely adopted. It has been shown to work well in vision-based autonomous driving under noisy input (Wang et al., 2023b) using the Gaussian kernel with Scott's rule (Scott, 2015) to select bandwidth. The main challenge of this technique is that it may make the system lose performance guarantees, such as safety. Other model merging approaches (Huang et al., 2023) (Ramé et al., 2023) (Wang et al., 2024) do not preserve safety either. We address the uncertainty and scalability problem using the proposed ABNets with provable safety.
|
| 255 |
+
|
| 256 |
+
CBFs and set invariance. In control theory, the set invariance has been widely adopted to prove and enforce the safety of dynamical systems (Blanchini, 1999) (Rakovic et al., 2005) (Ames et al., 2017) (Xiao & Belta, 2021) (Xiao
|
| 257 |
+
|
| 258 |
+
et al., 2023). The Control Barrier Function (CBF) (Ames et al., 2017) (Xiao & Belta, 2021) is such a state of the art technique that can enforce set invariance (Aubin, 2009), (Prajna et al., 2007), (Wisniewski & Sloth, 2013), and transforms a nonlinear optimization problem to a quadratic problem that is very efficient to solve. CBFs originates from barrier functions that are originally used in optimization problems (Boyd & Vandenberghe, 2004). However, the CBF tends to make the system conservative (i.e., at the cost of performance) in order to enforce safety, and it is not scalable to build large models. Our proposed ABNet can address all these limitations.
|
| 259 |
+
|
| 260 |
+
Safety in neural networks. Safety is usually enforced using optimizations. Barrier functions have been widely used in safe Reinforcement Learning (RL) (Tessler et al., 2018; Achiam et al., 2017). However, safety cannot be guaranteed in safe RL as the barrier functions are used as part of the reward function (a soft constraint). Recently, differentiable optimizations show great potential for learning-based control with safety guarantees (Pereira et al., 2020; Amos et al., 2018; Xiao et al., 2023; Liu et al., 2023). The quadratic program (QP) can be employed as a layer in the neural network, i.e., the OptNet (Amos & Kolter, 2017). The OptNet has been used with CBFs in neural networks as a safe filter controls (Pereira et al., 2020), in which CBFs themselves are not trainable, which can significantly limit the learning capability. Neural network controllers with safety certificate have been learned through verification-in-the-loop training (Deshmukh et al., 2019; Zhao et al., 2021; Ferlez et al., 2020). However, the verification method cannot guarantee to cover the whole state space, and this method is also very computationally expensive. None of these methods are scalable to larger models, and are subject to uncertainty, which the proposed ABNet can address.
|
| 261 |
+
|
| 262 |
+
# 6. Conclusions, Limitations and Future Work
|
| 263 |
+
|
| 264 |
+
We propose Adaptive explicit-Barrier Net (ABNet) that merges many safety-critical learning models while preserving the safety in this paper. The proposed ABNet is efficient to train, scalable to build larger safe learning models, can achieve better performance, and is robust to input noise. We have demonstrated the effectiveness of the model on a series of robot control tasks. Nonetheless, our model (and all the other barrier-based learning models (Ferlez et al., 2020) (Xiao et al., 2023)) still have a few limitations motivating for further research.
|
| 265 |
+
|
| 266 |
+
Limitations. First, all the ABNets have the same safety constraints. We will explore how to merge ABNets with different safety constraints in the future. Second, the ABNet also requires safety specifications that may be unknown in some robot control tasks, we may learn the safety specifications from data (Robey et al., 2020), (Srinivasan et al.,
|
| 267 |
+
|
| 268 |
+
2020), and this can also be done in conjunction with ABNet. Third, the model merging is done in the output space, future work will further focus on model merging with safety guarantees in the parameter space. Finally, we will apply the proposed model in environments that involve contact handling, such as grasping.
|
| 269 |
+
|
| 270 |
+
# Acknowledgements
|
| 271 |
+
|
| 272 |
+
The research was supported in part by Capgemini Engineering. It was also partially sponsored by the United States Air Force Research Laboratory and the United States Air Force Artificial Intelligence Accelerator and was accomplished under Cooperative Agreement Number FA8750-19-2-1000. The views and conclusions contained in this document are those of the authors and should not be interpreted as representing the official policies, either expressed or implied, of the United States Air Force or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Government purposes notwithstanding any copyright notation herein. This research was also supported in part by the AI2050 program at Schmidt Futures (Grant G-965 22-63172), and by the ONR Science of Autonomy program N00014-23-1-2354.
|
| 273 |
+
|
| 274 |
+
# Impact Statement
|
| 275 |
+
|
| 276 |
+
This paper presents work whose goal is to advance the field of Machine Learning. There are many potential societal consequences of our work, none which we feel must be specifically highlighted here.
|
| 277 |
+
|
| 278 |
+
# References
|
| 279 |
+
|
| 280 |
+
Achiam, J., Held, D., Tamar, A., and Abbeel, P. Constrained policy optimization. In International conference on machine learning, pp. 22-31. PMLR, 2017.
|
| 281 |
+
Agarwal, N., Brukhim, N., Hazan, E., and Lu, Z. Boosting for control of dynamical systems. In International Conference on Machine Learning, pp. 96-103. PMLR, 2020.
|
| 282 |
+
Ames, A. D., Xu, X., Grizzle, J. W., and Tabuada, P. Control barrier function based quadratic programs for safety critical systems. IEEE Transactions on Automatic Control, 62(8):3861-3876, 2017.
|
| 283 |
+
Amini, A., Wang, T.-H., Gilitschenski, I., Schwarting, W., Liu, Z., Han, S., Karaman, S., and Rus, D. Vista 2.0: An open, data-driven simulator for multimodal sensing and policy learning for autonomous vehicles. In 2022 International Conference on Robotics and Automation (ICRA), pp. 2419-2426. IEEE, 2022.
|
| 284 |
+
Amos, B. and Kolter, J. Z. Optnet: Differentiable optimization
|
| 285 |
+
|
| 286 |
+
tion as a layer in neural networks. In Proceedings of the 34th International Conference on Machine Learning - Volume 70, pp. 136-145, 2017.
|
| 287 |
+
Amos, B., Rodriguez, I. D. J., Sacks, J., Boots, B., and Kolter, J. Z. Differentiable mpc for end-to-end planning and control. In Proceedings of the 32nd International Conference on Neural Information Processing Systems, pp. 8299-8310. Curran Associates Inc., 2018.
|
| 288 |
+
Aubin, J.-P. Viability theory. Springer, 2009.
|
| 289 |
+
Beygelzimer, A., Hazan, E., Kale, S., and Luo, H. Online gradient boosting. Advances in neural information processing systems, 28, 2015.
|
| 290 |
+
Blanchini, F. Set invariance in control. Automatica, 35(11): 1747-1767, 1999.
|
| 291 |
+
Bommasani, R., Hudson, D. A., Adeli, E., Altman, R., Arora, S., von Arx, S., Bernstein, M. S., Bohg, J., Bosse-lut, A., Brunskill, E., et al. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258, 2021.
|
| 292 |
+
Boyd, S. P. and Vandenberghe, L. Convex optimization. Cambridge university press, New York, 2004.
|
| 293 |
+
Deshmukh, J. V., Kapinski, J. P., Yamaguchi, T., and Prokhorov, D. Learning deep neural network controllers for dynamical systems with safety guarantees: Invited paper. In 2019 IEEE/ACM International Conference on Computer-Aided Design (ICCAD), pp. 1-7, 2019.
|
| 294 |
+
Ferlez, J., Elnaggar, M., Shoukry, Y., and Fleming, C. Shieldnn: A provably safe nn filter for unsafe nn controllers. preprint arXiv:2006.09564, 2020.
|
| 295 |
+
Gal, Y. and Ghahramani, Z. Dropout as a bayesian approximation: Representing model uncertainty in deep learning. In international conference on machine learning, pp. 1050-1059. PMLR, 2016.
|
| 296 |
+
Glotfelter, P., Cortes, J., and Egerstedt, M. Nonsmooth barrier functions with applications to multi-robot systems. IEEE control systems letters, 1(2):310-315, 2017.
|
| 297 |
+
Huang, C., Liu, Q., Lin, B. Y., Pang, T., Du, C., and Lin, M. Lorahub: Efficient cross-task generalization via dynamic lora composition. arXiv preprint arXiv:2307.13269, 2023.
|
| 298 |
+
Kahn, G., Villaflor, A., Pong, V., Abbeel, P., and Levine, S. Uncertainty-aware reinforcement learning for collision avoidance. arXiv preprint arXiv:1702.01182, 2017.
|
| 299 |
+
Lakshminarayanan, B., Pritzel, A., and Blundell, C. Simple and scalable predictive uncertainty estimation using deep ensembles. Advances in neural information processing systems, 30, 2017.
|
| 300 |
+
|
| 301 |
+
Levine, S., Finn, C., Darrell, T., and Abbeel, P. End-to-end training of deep visuomotor policies. Journal of Machine Learning Research, 17(39):1-40, 2016.
|
| 302 |
+
Li, J., Li, D., Xiong, C., and Hoi, S. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pp. 12888-12900. PMLR, 2022.
|
| 303 |
+
Liu, W., Xiao, W., and Belta, C. Learning robust and correct controllers from signal temporal logic specifications using barriernet. In 2023 62nd IEEE Conference on Decision and Control (CDC), pp. 7049-7054. IEEE, 2023.
|
| 304 |
+
Loquercio, A., Segu, M., and Scaramuzza, D. A general framework for uncertainty estimation in deep learning. IEEE Robotics and Automation Letters, 5(2):3153-3160, 2020.
|
| 305 |
+
Luenberger, D. G. Optimization by vector space methods. John Wiley & Sons, 1997.
|
| 306 |
+
Nagumo, M. Über die lage der integralkurven gewöhnlicher differentialgleichungen. In Proceedings of the Physico-Mathematical Society of Japan. 3rd Series. 24:551-559, 1942.
|
| 307 |
+
Pereira, M. A., Wang, Z., Exarchos, I., and Theodorou, E. A. Safe optimal control using stochastic barrier functions and deep forward-backward sdes. In Conference on Robot Learning, 2020.
|
| 308 |
+
Prajna, S., Jadbabaie, A., and Pappas, G. J. A framework for worst-case and stochastic safety verification using barrier certificates. IEEE Transactions on Automatic Control, 52 (8):1415-1428, 2007.
|
| 309 |
+
Rakovic, S. V., Kerrigan, E. C., Kouramas, K. I., and Mayne, D. Q. Invariant approximations of the minimal robust positively invariant set. IEEE Transactions on automatic control, 50(3):406-410, 2005.
|
| 310 |
+
Rame, A., Ahuja, K., Zhang, J., Cord, M., Bottou, L., and Lopez-Paz, D. Model ratatouille: Recycling diverse models for out-of-distribution generalization. In International Conference on Machine Learning, pp. 28656-28679. PMLR, 2023.
|
| 311 |
+
Riquelme, C., Puigcerver, J., Mustafa, B., Neumann, M., Jenatton, R., Susano Pinto, A., Keysers, D., and Houlsby, N. Scaling vision with sparse mixture of experts. Advances in Neural Information Processing Systems, 34: 8583-8595, 2021.
|
| 312 |
+
Robey, A., Hu, H., Lindemann, L., Zhang, H., Dimarogonas, D. V., Tu, S., and Matni, N. Learning control barrier functions from expert demonstrations. In 2020 59th IEEE
|
| 313 |
+
|
| 314 |
+
Conference on Decision and Control (CDC), pp. 3717-3724, 2020.
|
| 315 |
+
Rucco, A., Notarstefano, G., and Hauser, J. An efficient minimum-time trajectory generation strategy for two-track car vehicles. IEEE Transactions on Control Systems Technology, 23(4):1505-1519, 2015.
|
| 316 |
+
Scott, D. W. Multivariate density estimation: theory, practice, and visualization. John Wiley & Sons, 2015.
|
| 317 |
+
Shazeer, N., Mirhoseini, A., Maziarz, K., Davis, A., Le, Q., Hinton, G., and Dean, J. Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. arXiv preprint arXiv:1701.06538, 2017.
|
| 318 |
+
Singh, I., Blukis, V., Mousavian, A., Goyal, A., Xu, D., Tremblay, J., Fox, D., Thomason, J., and Garg, A. Prompt: Generating situated robot task plans using large language models. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 11523-11530. IEEE, 2023.
|
| 319 |
+
Srinivasan, M., Dabholkar, A., Coogan, S., and Vela, P. A. Synthesis of control barrier functions using a supervised machine learning approach. In 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 7139-7145, 2020.
|
| 320 |
+
Tessler, C., Mankowitz, D. J., and Mannor, S. Reward constrained policy optimization. arXiv preprint arXiv:1805.11074, 2018.
|
| 321 |
+
Wang, L., Zhao, J., Du, Y., Adelson, E. H., and Tedrake, R. Poco: Policy composition from and for heterogeneous robot learning. arXiv preprint arXiv:2402.02511, 2024.
|
| 322 |
+
Wang, T.-H., Maalouf, A., Xiao, W., Ban, Y., Amini, A., Rosman, G., Karaman, S., and Rus, D. Drive anywhere: Generalizable end-to-end autonomous driving with multi-modal foundation models. arXiv preprint arXiv:2310.17642, 2023a.
|
| 323 |
+
Wang, T.-H., Xiao, W., Chahine, M., Amini, A., Hasani, R., and Rus, D. Learning stability attention in vision-based end-to-end driving policies. In Proceedings of The 5th Annual Learning for Dynamics and Control Conference, volume 211 of Proceedings of Machine Learning Research, pp. 1099-1111. PMLR, 15-16 Jun 2023b.
|
| 324 |
+
Wisniewski, R. and Sloth, C. Converse barrier certificate theorem. In Proc. of 52nd IEEE Conference on Decision and Control, pp. 4713-4718, Florence, Italy, 2013.
|
| 325 |
+
Xiao, W. and Belta, C. High-order control barrier functions. IEEE Transactions on Automatic Control, 67(7):3655-3662, 2021.
|
| 326 |
+
|
| 327 |
+
Xiao, W., Belta, C., and Cassandras, C. G. Adaptive control barrier functions. IEEE Transactions on Automatic Control, 67(5):2267-2281, 2021.
|
| 328 |
+
Xiao, W., Wang, T.-H., Hasani, R., Chahine, M., Amini, A., Li, X., and Rus, D. Barriernet: Differentiable control barrier functions for learning of safe robot control. IEEE Transactions on Robotics, 39(3):2289-2307, 2023.
|
| 329 |
+
Zhao, H., Zeng, X., Chen, T., Liu, Z., and Woodcock, J. Learning safe neural network controllers with barrier certificates. Form Asp Comp, 33:437-455, 2021.
|
| 330 |
+
Zhou, Y., Lei, T., Liu, H., Du, N., Huang, Y., Zhao, V., Dai, A. M., Le, Q. V., Laudon, J., et al. Mixture-of-experts with expert choice routing. Advances in Neural Information Processing Systems, 35:7103-7114, 2022.
|
| 331 |
+
|
| 332 |
+
# A. Closed-form Solution of the Explicit-Barrier
|
| 333 |
+
|
| 334 |
+
Here, we show the process of deriving the closed-form solution of the explicit-Barrier following (Luenberger, 1997) (Ames et al., 2017).
|
| 335 |
+
|
| 336 |
+
Similarly as in (3), we consider the following optimization (with the two specifications $b_{I}(x), b_{II}(x)$ shown in the main text) corresponding to the explicit-Barrier:
|
| 337 |
+
|
| 338 |
+
$$
|
| 339 |
+
\boldsymbol {u} _ {k} = \arg \min _ {\boldsymbol {u} (t)} \frac {1}{2} \boldsymbol {u} (t) ^ {T} H \left(\boldsymbol {z} _ {k} \mid \theta_ {h, k}\right) \boldsymbol {u} (t) + F ^ {T} \left(\boldsymbol {z} _ {k} \mid \theta_ {f, k}\right) \boldsymbol {u} (t) \tag {7}
|
| 340 |
+
$$
|
| 341 |
+
|
| 342 |
+
s.t.
|
| 343 |
+
|
| 344 |
+
$$
|
| 345 |
+
L _ {f} \psi_ {I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) + \left[ L _ {g} \psi_ {I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) \right] \boldsymbol {u} + p _ {m, k} \left(\boldsymbol {z} _ {k} | \theta_ {p, k} ^ {m}\right) \alpha_ {I, m} \left(\psi_ {I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p})\right) \geq 0,
|
| 346 |
+
$$
|
| 347 |
+
|
| 348 |
+
$$
|
| 349 |
+
L _ {f} \psi_ {I I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \boldsymbol {\theta} _ {p}) + \left[ L _ {g} \psi_ {I I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \boldsymbol {\theta} _ {p}) \right] \boldsymbol {u} + p _ {m, k} \left(\boldsymbol {z} _ {k} | \boldsymbol {\theta} _ {p, k} ^ {m}\right) \alpha_ {I I, m} \left(\psi_ {I I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \boldsymbol {\theta} _ {p})\right) \geq 0,
|
| 350 |
+
$$
|
| 351 |
+
|
| 352 |
+
We first define
|
| 353 |
+
|
| 354 |
+
$$
|
| 355 |
+
g _ {1} (\boldsymbol {x}) = \left[ - L _ {g} \psi_ {I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) \right], \quad h _ {1} (\boldsymbol {x}) = L _ {f} \psi_ {I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) + p _ {m, k} \left(\boldsymbol {z} _ {k} \mid \theta_ {p, k} ^ {m}\right) \alpha_ {I, m} \left(\psi_ {I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p})\right), \tag {8}
|
| 356 |
+
$$
|
| 357 |
+
|
| 358 |
+
$$
|
| 359 |
+
g _ {2} (\boldsymbol {x}) = \left[ - L _ {g} \psi_ {I I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) \right], \quad h _ {2} (\boldsymbol {x}) = L _ {f} \psi_ {I I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) + p _ {m, k} (z _ {k} | \theta_ {p, k} ^ {m}) \alpha_ {I I, m} (\psi_ {I I, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p})).
|
| 360 |
+
$$
|
| 361 |
+
|
| 362 |
+
The matrix $H$ is positive definite in the above optimization (7), we then define
|
| 363 |
+
|
| 364 |
+
$$
|
| 365 |
+
[ \hat {g} _ {1} (\boldsymbol {x}), \hat {g} _ {2} (\boldsymbol {x}) ] = H \left(\boldsymbol {z} _ {k} \mid \theta_ {h, k}\right) ^ {- 1} \left[ g _ {1} (\boldsymbol {x}), g _ {2} (\boldsymbol {x}) \right],
|
| 366 |
+
$$
|
| 367 |
+
|
| 368 |
+
$$
|
| 369 |
+
\left[ \begin{array}{l} \hat {h} _ {1} (\boldsymbol {x}) \\ \hat {h} _ {2} (\boldsymbol {x}) \end{array} \right] = \left[ \begin{array}{l} h _ {1} (\boldsymbol {x}) \\ h _ {2} (\boldsymbol {x}) \end{array} \right] - \left[ \begin{array}{l} g _ {1} (\boldsymbol {x}) ^ {T} \\ g _ {2} (\boldsymbol {x}) ^ {T} \end{array} \right] \hat {\boldsymbol {u}} _ {k} \tag {9}
|
| 370 |
+
$$
|
| 371 |
+
|
| 372 |
+
where
|
| 373 |
+
|
| 374 |
+
$$
|
| 375 |
+
\hat {\boldsymbol {u}} _ {k} = - H \left(\boldsymbol {z} _ {k} \mid \theta_ {h, k}\right) ^ {- 1} F \left(\boldsymbol {z} _ {k} \mid \theta_ {f, k}\right). \tag {10}
|
| 376 |
+
$$
|
| 377 |
+
|
| 378 |
+
Next, let $\pmb{v}_k \coloneqq \pmb{u}_k - \hat{\pmb{u}}_k$ and $\langle \cdot, \cdot \rangle$ define an inner product with weight matrix $H(\pmb{z}_k | \theta_{h,k})$ so that $\langle \pmb{v}_k, \pmb{v}_k \rangle = (\pmb{v}_k)^T H(\pmb{z}_k | \theta_{h,k}) \pmb{v}_k$ . The optimization problem (7) is equivalent to:
|
| 379 |
+
|
| 380 |
+
$$
|
| 381 |
+
\boldsymbol {v} _ {k} ^ {*} = \underset {\boldsymbol {v} _ {k}} {\arg \min } \langle \boldsymbol {v} _ {k}, \boldsymbol {v} _ {k} \rangle ,
|
| 382 |
+
$$
|
| 383 |
+
|
| 384 |
+
$$
|
| 385 |
+
\text {s . t .}, \langle \hat {g} _ {1} (\boldsymbol {x}), \boldsymbol {v} _ {k} \rangle \leq \hat {h} _ {1} (\boldsymbol {x}), \tag {11}
|
| 386 |
+
$$
|
| 387 |
+
|
| 388 |
+
$$
|
| 389 |
+
\langle \hat {g} _ {2} (\boldsymbol {x}), \boldsymbol {v} _ {k} \rangle \leq \hat {h} _ {2} (\boldsymbol {x}).
|
| 390 |
+
$$
|
| 391 |
+
|
| 392 |
+
Finally, we have that the optimal solution of (7) is given by
|
| 393 |
+
|
| 394 |
+
$$
|
| 395 |
+
\boldsymbol {u} _ {k} = \boldsymbol {v} _ {k} ^ {*} + \hat {\boldsymbol {u}} _ {k}. \tag {12}
|
| 396 |
+
$$
|
| 397 |
+
|
| 398 |
+
Let $G(\pmb{x}) = [G_{ij}(\pmb{x})] = [\langle \hat{g}_i(\pmb{x}),\hat{g}_j(\pmb{x})\rangle ],i,j = 1,2$ is the Gram matrix. Following (Luenberger, 1997) [Ch. 3], the unique solution $\pmb{v}_k^*$ to (11) is given by
|
| 399 |
+
|
| 400 |
+
$$
|
| 401 |
+
\boldsymbol {v} _ {k} ^ {*} = \lambda_ {1} (\boldsymbol {x}) \hat {g} _ {1} (\boldsymbol {x}) + \lambda_ {2} (\boldsymbol {x}) \hat {g} _ {2} (\boldsymbol {x}) \tag {13}
|
| 402 |
+
$$
|
| 403 |
+
|
| 404 |
+
where the two gate functions $\lambda_1(x),\lambda_2(x)$ are given by:
|
| 405 |
+
|
| 406 |
+
$$
|
| 407 |
+
\lambda_ {1} (\boldsymbol {x}) = \left\{ \begin{array}{l l} 0 & \text {i f} G _ {2 1} (\boldsymbol {x}) \max \left(\hat {h} _ {2} (\boldsymbol {x}), 0\right) - G _ {2 2} (\boldsymbol {x}) \hat {h} _ {1} (\boldsymbol {x}) < 0 \\ \frac {\operatorname* {m a x} \left(\hat {h} _ {1} (\boldsymbol {x}) , 0\right)}{G _ {1 1} (\boldsymbol {x})} & \text {i f} G _ {1 2} (\boldsymbol {x}) \max \left(\hat {h} _ {1} (\boldsymbol {x}), 0\right) - G _ {1 1} (\boldsymbol {x}) \hat {h} _ {2} (\boldsymbol {x}) < 0 \\ \frac {\operatorname* {m a x} \left(G _ {2 2} (\boldsymbol {x}) \hat {h} _ {1} (\boldsymbol {x}) - G _ {2 1} (\boldsymbol {x}) \hat {h} _ {2} (\boldsymbol {x}), 0\right)}{G _ {1 1} (\boldsymbol {x}) G _ {2 2} (\boldsymbol {x}) - G _ {1 2} (\boldsymbol {x}) G _ {2 1} (\boldsymbol {x})} & \text {o t h e r w i s e .} \end{array} \right. \tag {14}
|
| 408 |
+
$$
|
| 409 |
+
|
| 410 |
+
$$
|
| 411 |
+
\lambda_ {2} (\boldsymbol {x}) = \left\{ \begin{array}{l l} \frac {\max \left(\hat {h} _ {2} (\boldsymbol {x}) , 0\right)}{G _ {2 2} (\boldsymbol {x})} & \text {i f} G _ {2 1} (\boldsymbol {x}) \max \left(\hat {h} _ {2} (\boldsymbol {x}), 0\right) - G _ {2 2} (\boldsymbol {x}) \hat {h} _ {1} (\boldsymbol {x}) < 0 \\ 0 & \text {i f} G _ {1 2} (\boldsymbol {x}) \max \left(\hat {h} _ {1} (\boldsymbol {x}), 0\right) - G _ {1 1} (\boldsymbol {x}) \hat {h} _ {2} (\boldsymbol {x}) < 0 \\ \frac {\max \left(G _ {1 1} (\boldsymbol {x}) \hat {h} _ {2} (\boldsymbol {x}) - G _ {1 2} (\boldsymbol {x}) \hat {h} _ {1} (\boldsymbol {x}), 0\right)}{G _ {1 1} (\boldsymbol {x}) G _ {2 2} (\boldsymbol {x}) - G _ {1 2} (\boldsymbol {x}) G _ {2 1} (\boldsymbol {x})} & \text {o t h e r w i s e .} \end{array} \right. \tag {15}
|
| 412 |
+
$$
|
| 413 |
+
|
| 414 |
+
# B. Proof of Theorems
|
| 415 |
+
|
| 416 |
+
Theorem 3.1. (Safety of ABNets) Given the multi-head ABNet formulated as in (4) and all other safe learning models (BarrierNet, dMPC, etc.). If the system is initially safe (i.e., $b_{j}(\boldsymbol{x}(t_{0})) \geq 0, \forall j \in S$ ), then a control policy $\boldsymbol{u}$ from the ABNet output (5) guarantees the safety of system, i.e., $b_{j}(\boldsymbol{x}(t)) \geq 0, \forall j \in S, \forall t \geq t_{0}$ .
|
| 417 |
+
|
| 418 |
+
Proof: The proof outline is to first show the existence of new HOCBF constraints (corresponding to all the safety specifications) that are defined over the output of the ABNet. Then, we can use Nagumo's theorem (Nagumo, 1942) to recursively show the forward invariance of each safety set in the HOCBFs, and this can eventually imply the satisfaction of the safety specifications $b_{j}(\boldsymbol{x}) \geq 0, \forall j \in S$ .
|
| 419 |
+
|
| 420 |
+
First, we show how we may ensure the safety of ABNet when there are other safe learning models, such as BarrierNet, dMPC, etc. Given a safe learning model, we have that $b_{j}(\boldsymbol{x}) \geq 0, \forall j \in S$ . By the adaptive CBF theorem (Xiao et al., 2021), we have that the satisfaction of the adaptive CBF constraint is a necessary and sufficient condition for the safety of the system. In other words, $b_{j}(\boldsymbol{x}) \geq 0, \forall j \in S$ implies that there exists an adaptive CBF:
|
| 421 |
+
|
| 422 |
+
$$
|
| 423 |
+
L _ {f} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) + \left[ L _ {g} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) \right] \boldsymbol {u} _ {k} + p _ {m, k} \left(\boldsymbol {z} _ {k} | \theta_ {p, k} ^ {m}\right) \alpha_ {j, m} \left(\psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p})\right) \geq 0, j \in S, \tag {16}
|
| 424 |
+
$$
|
| 425 |
+
|
| 426 |
+
where $p_{m,k}(\boldsymbol{z}_k|\boldsymbol{\theta}_{p,k}^m) > 0$ is the penalty (adaptive) function, and $\psi_{j,m-1}(\boldsymbol{x},\boldsymbol{z}|\boldsymbol{\theta}_p)$ is defined as in (4).
|
| 427 |
+
|
| 428 |
+
Next, we consider the explicit-Barrier model. As shown in Appendix sec. A, the explicit-barrier (4) is the exact solution of the QP (7). The solution of the QP (7) further implies the satisfaction of $b_{I}(\pmb{x}) \geq 0, b_{II}(\pmb{x}) \geq 0$ by the HOCBF theory (Xiao & Belta, 2021), which is equivalent to have that $b_{j}(\pmb{x}) \geq 0, \forall j \in S$ (shown right before (4)). Again, by the adaptive CBF theorem, we have that there exist adaptive CBFs as in the form of (16).
|
| 429 |
+
|
| 430 |
+
Finally, we only need to consider the case of fusing controllers $\pmb{u}_k, k \in \{1, \dots, h\}$ that satisfy the following:
|
| 431 |
+
|
| 432 |
+
$$
|
| 433 |
+
L _ {f} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) + \left[ L _ {g} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) \right] \boldsymbol {u} _ {k} + p _ {m, k} \left(\boldsymbol {z} _ {k} | \theta_ {p, k} ^ {m}\right) \alpha_ {j, m} \left(\psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p})\right) \geq 0, j \in S, \tag {17}
|
| 434 |
+
$$
|
| 435 |
+
|
| 436 |
+
Multiplying the weight $w_{k} \geq 0$ to the last equation, we have
|
| 437 |
+
|
| 438 |
+
$$
|
| 439 |
+
w _ {k} L _ {f} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) + w _ {k} \left[ L _ {g} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) \right] \boldsymbol {u} _ {k} + w _ {k} p _ {m, k} \left(z _ {k} | \theta_ {p, k} ^ {m}\right) \alpha_ {j, m} \left(\psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p})\right) \geq 0, j \in S, \tag {18}
|
| 440 |
+
$$
|
| 441 |
+
|
| 442 |
+
Taking a summation of the last equation over all $k \in \{1, \dots, h\}$ , the following equation establishes:
|
| 443 |
+
|
| 444 |
+
$$
|
| 445 |
+
\begin{array}{l} \sum_ {k = 1} ^ {h} w _ {k} L _ {f} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) + \sum_ {k = 1} ^ {h} w _ {k} \left[ L _ {g} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) \right] \boldsymbol {u} _ {k} \\ \begin{array}{l} = 1 \\ + \sum_ {k = 1} ^ {h} w _ {k} p _ {m, k} \left(\boldsymbol {z} _ {k} \mid \theta_ {p, k} ^ {m}\right) \alpha_ {j, m} \left(\psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} \mid \theta_ {p})\right) \geq 0, j \in S, \end{array} \tag {19} \\ \end{array}
|
| 446 |
+
$$
|
| 447 |
+
|
| 448 |
+
Since $L_{g}\psi_{j,m - 1}(\pmb {x},\pmb {z}|\theta_{p}$ is a vector that is independent of $k$ and $\sum_{k = 1}^{h}w_{k} = 1$ , the last equation can be rewritten as:
|
| 449 |
+
|
| 450 |
+
$$
|
| 451 |
+
\begin{array}{l} L _ {f} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) + L _ {g} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) \left(\sum_ {k = 1} ^ {h} w _ {k} \boldsymbol {u} _ {k}\right) \\ + \sum_ {k = 1} ^ {h} w _ {k} p _ {m, k} \left(\boldsymbol {z} _ {k} \mid \boldsymbol {\theta} _ {p, k} ^ {m}\right) \alpha_ {j, m} \left(\psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} \mid \boldsymbol {\theta} _ {p})\right) \geq 0, j \in S, \\ \end{array}
|
| 452 |
+
$$
|
| 453 |
+
|
| 454 |
+
The summation of class $\mathcal{H}$ functions is also a class $\mathcal{H}$ function. Since $\alpha_{j,m}$ are class $\mathcal{H}$ functions, the $\sum_{k=1}^{h} w_k p_{m,k} (\pmb{z}_k | \theta_{p,k}^m) \alpha_{j,m} (\psi_{j,m-1}(\pmb{x}, \pmb{z} | \theta_p))$ is also a class $\mathcal{H}$ function over $\psi_{j,m-1}(\pmb{x}, \pmb{z} | \theta_p)$ . Therefore, equations (20) are the new HOCBF constraints defined over the output of the ABNet, i.e., $\sum_{k=1}^{h} w_k \pmb{u}_k$ . In other words, whenever $\psi_{j,m-1}(\pmb{x}, \pmb{z} | \theta_p) = 0$ , we have
|
| 455 |
+
|
| 456 |
+
$$
|
| 457 |
+
L _ {f} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \boldsymbol {\theta} _ {p}) + L _ {g} \psi_ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \boldsymbol {\theta} _ {p}) \left(\sum_ {k = 1} ^ {h} w _ {k} \boldsymbol {u} _ {k}\right) \geq 0, j \in S, \tag {21}
|
| 458 |
+
$$
|
| 459 |
+
|
| 460 |
+
The controls (outputs of the ABNet) $\sum_{k=1}^{h} w_k u_k \equiv u$ are directly used to drive the system, and $z$ is taken as a piece-wise constant within discretized time intervals (Xiao et al., 2023). Therefore, the last equation can be rewritten as
|
| 461 |
+
|
| 462 |
+
$$
|
| 463 |
+
\frac {\partial \psi_ {j , m - 1} (\boldsymbol {x} , \boldsymbol {z} | \theta_ {p})}{\partial \boldsymbol {x}} (f (\boldsymbol {x}) + g (\boldsymbol {x}) \boldsymbol {u}) = \frac {\partial \psi_ {j , m - 1} (\boldsymbol {x} , \boldsymbol {z} | \theta_ {p})}{\partial \boldsymbol {x}} \dot {\boldsymbol {x}} = \dot {\psi} _ {j, m - 1} (\boldsymbol {x}, \boldsymbol {z} | \theta_ {p}) \geq 0, j \in S, \tag {22}
|
| 464 |
+
$$
|
| 465 |
+
|
| 466 |
+
Since $b_{j}(\pmb{x}(t_{0})) \geq 0$ , we can always initialize the HOCBF definition such that $\dot{\psi}_{j,m-1}(\pmb{x},\pmb{z}|\theta_{p}) \geq 0$ is satisfied at $t_{0}$ (Xiao & Belta, 2021). By Nagumo's theorem (Nagumo, 1942) and (20)-(22), we have that $\psi_{j,m-1}(\pmb{x},\pmb{z}|\theta_{p}) \geq 0, \forall t \geq t_{0}$ .
|
| 467 |
+
|
| 468 |
+
Recursively, we can show that $\psi_{j,i}(\pmb{x},\pmb{z}|\theta_p) \geq 0, \forall t \geq t_0, \forall i \in \{0,\dots,m-1\}$ from $i = m-1$ to $i = 0$ . Since $b_j(\pmb{x}) = \psi_{j,0}(\pmb{x},\pmb{z}|\theta_p)$ by (3), we have that $b_j(\pmb{x}(t)) \geq 0, \forall t \geq t_0, \forall j \in S$ , which the safety guarantees of the ABNet for the system.
|
| 469 |
+
|
| 470 |
+
Theorem 3.2. (Safety of merging of ABNets) Given two ABNets, the merged model using the form as in (5) again guarantees the safety of system.
|
| 471 |
+
|
| 472 |
+
Proof: The proof outline is similar to that of Theorem 3.1. From each ABNet, we can show the existence of new HOCBF constraints (corresponding to all the safety specifications) that are defined over the output of each ABNet. Then we can again show the existence of another set of new HOCBF constraints (corresponding to all the safety specifications) that are defined over the output of the merged ABNet. Finally, we can also use Nagumo's theorem (Nagumo, 1942) to recursively show the forward invariance of each safety set in the HOCBFs, and this can eventually imply the satisfaction of the safety specifications $b_{j}(\boldsymbol{x}) \geq 0, \forall j \in S$ .
|
| 473 |
+
|
| 474 |
+
The mathematical proof is similar to that of Theorem 3.1, and thus is omitted.
|
| 475 |
+
|
| 476 |
+
# C. Experiment Details
|
| 477 |
+
|
| 478 |
+
Metrics used in all the tables. The SAFETY metric is defined as:
|
| 479 |
+
|
| 480 |
+
$$
|
| 481 |
+
\text {S A F E T Y} = \min _ {k} \left\{\min _ {t \in [ t _ {0}, T ]} b (\boldsymbol {x} (t)) \right\} _ {k}, k \in \{1, \dots , N \}, \tag {23}
|
| 482 |
+
$$
|
| 483 |
+
|
| 484 |
+
where $N$ is the number of testing runs ( $N = 100$ in this case). $T$ is the final time of each run. $b(\pmb{x}) \geq 0$ is the safety constraint that is given explicitly in each experiment below.
|
| 485 |
+
|
| 486 |
+
The CONSER. metric is defined as
|
| 487 |
+
|
| 488 |
+
$$
|
| 489 |
+
\begin{array}{l} \text {C O N S E R .} \text {m e a n} = \underset {k} {\text {m e a n}} \left\{\underset {t \in [ t _ {0}, T ]} {\text {m i n}} b (\boldsymbol {x} (t)) \right\} _ {k}, k \in \{1, \dots , N \}, \\ \text {C O N S E R .} \operatorname {s t d} = \operatorname {s t d} _ {k} \left\{\min _ {t \in [ t _ {0}, T ]} b (\boldsymbol {x} (t)) \right\} _ {k}, k \in \{1, \dots , N \}. \tag {24} \\ \end{array}
|
| 490 |
+
$$
|
| 491 |
+
|
| 492 |
+
The UNCERTAINTY metric for both controls are calculated by:
|
| 493 |
+
|
| 494 |
+
$$
|
| 495 |
+
u _ {i} \text {U N C E R T A I N T Y} = \underset {t \in [ t _ {0}, T ]} {\text {m e a n}} \left\{\underset {k} {\operatorname {s t d}} \left\{u _ {i} (t) \right\} _ {k}, k \in \{1, \dots , N \} \right\}, i \in \{1, 2 \}. \tag {25}
|
| 496 |
+
$$
|
| 497 |
+
|
| 498 |
+
All the class $\mathcal{H}$ functions in the BarrierNets/ABNets are implemented as linear functions with trainable slopes.
|
| 499 |
+
|
| 500 |
+
# C.1. Training Stability and Efficiency
|
| 501 |
+
|
| 502 |
+
The dQP (Amos & Kolter, 2017) could give very bad solutions (although it still satisfies the safety constraints), as shown in Fig. 7 (right), this could significantly deteriorate the training quality of the model.
|
| 503 |
+
|
| 504 |
+
# C.2. 2D Robot Obstacle Avoidance
|
| 505 |
+
|
| 506 |
+
Models. All the models include fully connected layers of shape [5, 128, 32, 32, 2] with RELU as activation functions. There are some additional layers of differentiable QPs in other models (other than E2E-related models). The model input is the system state and the goal.
|
| 507 |
+
|
| 508 |
+

|
| 509 |
+
Figure 7: Comparison of ABNet (left) and BarrierNet (right) (based on dQP) in training stability. BarrierNet tends to give very bad solutions. "time" in the x-axis denotes training iterations.
|
| 510 |
+
|
| 511 |
+

|
| 512 |
+
|
| 513 |
+
Training and Dataset. The dataset includes 100 trajectories, and each trajectory has 137 trajectory points. The ground truth controls (i.e., training labels) are obtained via solving HOCBF-based QPs (Xiao & Belta, 2021). We use Adam as the optimizer to train the model with a MSE loss function and a learning rate 0.001. We use the QPFunction from the OptNet (Amos & Kolter, 2017) to solve the dQPs. The training time of the ABNet is about 1 hour for 20 epochs on a RTX-3090 computer.
|
| 514 |
+
|
| 515 |
+
Robot dynamics and safety constraints. We employ the bicycle model as the robot dynamics:
|
| 516 |
+
|
| 517 |
+
$$
|
| 518 |
+
\underbrace {\left[ \begin{array}{l} \dot {x} (t) \\ \dot {y} (t) \\ \dot {\theta} (t) \\ \dot {v} (t) \end{array} \right]} _ {\dot {\boldsymbol {x}} (t)} = \underbrace {\left[ \begin{array}{c} v (t) \cos \theta (t) \\ v (t) \sin \theta (t) \\ 0 \\ 0 \end{array} \right]} _ {f (\boldsymbol {x})} + \underbrace {\left[ \begin{array}{l l} 0 & 0 \\ 0 & 0 \\ 1 & 0 \\ 0 & 1 \end{array} \right]} _ {g (\boldsymbol {x})} \underbrace {\left[ \begin{array}{l} u _ {1} (t) \\ u _ {2} (t) \end{array} \right]} _ {\boldsymbol {u}} \tag {26}
|
| 519 |
+
$$
|
| 520 |
+
|
| 521 |
+
where $(x,y)\in \mathbb{R}^2$ denotes the 2D location of the robot, $\theta \in \mathbb{R}$ is the heading angle of the robot, $\nu \in \mathbb{R}$ is the linear speed of the robot. $u_{1},u_{2}$ are the angular speed and acceleration controls, respectively.
|
| 522 |
+
|
| 523 |
+
The safety constraint of the robot is defined as:
|
| 524 |
+
|
| 525 |
+
$$
|
| 526 |
+
b (\boldsymbol {x}) = \left(x - x _ {0}\right) ^ {2} + \left(y - y _ {0}\right) ^ {2} - R ^ {2} \geq 0, \tag {27}
|
| 527 |
+
$$
|
| 528 |
+
|
| 529 |
+
where $(x_0,y_0)\in \mathbb{R}^2$ is the 2D location of the obstacle, and $R > 0$ is its size.
|
| 530 |
+
|
| 531 |
+
Acceleration control profiles. We show the acceleration control profiles in Fig. 8. The corresponding uncertainty is also significantly decreased with the proposed ABNet.
|
| 532 |
+
|
| 533 |
+
# C.3. Safe Robot Manipulation
|
| 534 |
+
|
| 535 |
+
Models. All the models include fully connected layers of shape [6, 128, 256, 128, 128, 32, 32, 2] with RELU as activation functions. There are some additional layers of differentiable QPs in other models (other than E2E-related models). The model input is the system state and the goal.
|
| 536 |
+
|
| 537 |
+
Training and Dataset. The dataset includes 1000 trajectories, and each trajectory has about 350 trajectory points. The ground truth controls (i.e., training labels) are obtained via solving HOCBF-based QPs (Xiao & Belta, 2021). We use Adam as the optimizer to train the model with a MSE loss function and a learning rate 0.001. We use the QPFunction from the OptNet (Amos & Kolter, 2017) to solve the dQPs. The training time of the ABNet is about 2 hours for 10 epochs on a RTX-3090 computer.
|
| 538 |
+
|
| 539 |
+

|
| 540 |
+
Figure 8: 2D robot obstacle avoidance acceleration control profiles and their distributions. The controls are subject to input noise, and thus are non-smooth. All the testings are done in a closed-loop fashion, i.e., the model outputs are directly used to control the robot.
|
| 541 |
+
|
| 542 |
+
Robot dynamics and safety constraints. We employ the following model as the manipulator dynamics:
|
| 543 |
+
|
| 544 |
+
$$
|
| 545 |
+
\underbrace {\left[ \begin{array}{l} \dot {\theta} _ {1} \\ \dot {\omega} _ {1} \\ \dot {\theta} _ {2} \\ \dot {\omega} _ {2} \end{array} \right]} _ {\boldsymbol {x}} = \underbrace {\left[ \begin{array}{c} \omega_ {1} \\ 0 \\ \omega_ {2} \\ 0 \end{array} \right]} _ {f (\boldsymbol {x})} + \underbrace {\left[ \begin{array}{l l} 0 & 0 \\ 1 & 0 \\ 0 & 0 \\ 0 & 1 \end{array} \right]} _ {g (\boldsymbol {x})} \underbrace {\left[ \begin{array}{l} u _ {1} \\ u _ {2} \end{array} \right]} _ {\boldsymbol {u}} \tag {28}
|
| 546 |
+
$$
|
| 547 |
+
|
| 548 |
+
where $(\theta_{1},\theta_{2})\in \mathbb{R}^{2}$ denotes the angles of the two-link manipulator joints (defined in the Cartesian space, we may get the joint space angles $q_{1} = \theta_{1},q_{2} = \theta_{2} - \theta_{1}$ , $(\omega_{1},\omega_{2})\in \mathbb{R}^{2}$ is the angular speed of the two-link manipulator joints, $u_{1},u_{2}$ are the angular acceleration controls corresponding to the two joints, respectively.
|
| 549 |
+
|
| 550 |
+
The safety constraint of the robot is defined as:
|
| 551 |
+
|
| 552 |
+
$$
|
| 553 |
+
b (\boldsymbol {x}) = \left(l _ {1} \cos \theta_ {1} + l _ {2} \cos \theta_ {2} - x _ {0}\right) ^ {2} + \left(l _ {1} \sin \theta_ {1} + l _ {2} \sin \theta_ {2} - y _ {0}\right) ^ {2} - R ^ {2} \geq 0, \tag {29}
|
| 554 |
+
$$
|
| 555 |
+
|
| 556 |
+
where $(x_0, y_0) \in \mathbb{R}^2$ is the location of the obstacle, and $R > 0$ is its size. $l_1 > 0, l_2 > 0$ are the length of the two links of the manipulator, respectively. In the current setting, the non-collision of the end-effector implies the non-collision of the link. Therefore, we only need to consider the safety of the end-effector. We show both the $u_1, u_2$ control profiles in Fig. 9 to demonstrate the advantage of the proposed ABNet. The metric definitions are the same as in the 2D robot obstacle avoidance, and the number of testing runs is $N = 100$ .
|
| 557 |
+
|
| 558 |
+
# C.4. Vision-based End-to-End Autonomous Driving
|
| 559 |
+
|
| 560 |
+
Models. All the models include CNN ([3, 24, 5, 2, 2], [24, 36, 5, 2, 2], [36, 48, 3, 2, 1], [48, 64, 3, 1, 1], [64, 64, 3, 1, 1]]) and LSTM layers (size: 64) and some fully connected layers of shape $[32, 32, 2] \times 2$ with RELU as activation functions. The dropout rates for both CNN and fully connected layers are 0.3. There are some additional layers of differentiable QPs in other models (other than E2E-related models). The model input is the front-view RGB images (shape: $3 \times 45 \times 155$ ) of the ego vehicle, and the outputs are the steering rate and acceleration controls of the vehicle.
|
| 561 |
+
|
| 562 |
+
Training and Dataset. The dataset is open-sourced including 0.4 million image-control pairs from a closed-road sim-to-real driving field. Static and parked cars of different types and colors are used as obstacles in the dataset. The dataset is collected from the VISTA simulator (Amini et al., 2022). The ground truth controls (i.e., training labels) are obtained via solving a nonlinear model predictive control (NMPC). We use Adam as the optimizer to train the model with a MSE loss function and a learning rate 0.001. We use the QPFunction from the OptNet (Amos & Kolter, 2017) to solve the dQPs. The training time of the ABNet is about 15 hours for 5 epochs on a RTX-3090 computer.
|
| 563 |
+
|
| 564 |
+

|
| 565 |
+
Figure 9: Robot manipulation joint control profiles and their distributions. The controls are subject to input noise, and thus are non-smooth. All the testings are done in a closed-loop fashion, i.e., the model outputs are directly used to control the manipulator.
|
| 566 |
+
|
| 567 |
+

|
| 568 |
+
|
| 569 |
+
Brief introduction to VISTA. VISTA is a sim-to-real driving simulator that can generate driving scenarios from real driving data (Amini et al., 2022). The VISTA allows us to train our model with guided policy learning. This learning method has been shown to work for model transfer to a full-scale real autonomous vehicle. There three steps to generate the data: (i) In VISTA, we randomly initialize the locations and poses of ego- and ado-cars that are associated with the real driving data; (ii) we use NMPC to collect ground-truth controls (training labels) with corresponding states, and (iii) we collect front-view RGB images along the trajectories generated from NMPC.
|
| 570 |
+
|
| 571 |
+
Vehicle dynamics and safety constraints. The vehicle dynamics are specified with respect to a reference trajectory (Rucco et al., 2015), such as the lane center line. The two most important states are the along-trajectory progress $s \in \mathbb{R}$ and the lateral offset distance $d \in \mathbb{R}$ of the vehicle center with respect to the trajectory. The dynamics are defined as:
|
| 572 |
+
|
| 573 |
+
$$
|
| 574 |
+
\underbrace {\left[ \begin{array}{l} \dot {s} \\ \dot {d} \\ \dot {\mu} \\ \dot {v} \\ \dot {\delta} \end{array} \right]} _ {\dot {x}} = \underbrace {\left[ \begin{array}{c} \frac {v \cos (\mu + \beta)}{1 - d \kappa} \\ v \sin (\mu + \beta) \\ \frac {v}{l _ {r}} \sin \beta - \kappa \frac {v \cos (\mu + \beta)}{1 - d \kappa} \\ 0 \\ 0 \end{array} \right]} _ {f (\boldsymbol {x})} + \underbrace {\left[ \begin{array}{l l} 0 & 0 \\ 0 & 0 \\ 0 & 0 \\ 1 & 0 \\ 0 & 1 \end{array} \right]} _ {g (\boldsymbol {x})} \underbrace {\left[ \begin{array}{l} u _ {1} \\ u _ {2} \end{array} \right]} _ {\boldsymbol {u}}, \tag {30}
|
| 575 |
+
$$
|
| 576 |
+
|
| 577 |
+
where $\mu$ is the local heading error of the vehicle with respect to the reference trajectory, $\nu$ is the linear speed of the vehicle, $\kappa$ is the curvature of the trajectory at the progress $s$ . $l_{r}$ is the length of the vehicle from the tail to the center, $\beta = \arctan \left(\frac{l_r}{l_r + l_f}\tan \delta\right)$ , where $l_{f}$ is the length of the vehicle from the head to the center. $u_{1}, u_{2}$ are the steering rate and acceleration controls of the vehicle, respectively.
|
| 578 |
+
|
| 579 |
+
The safety constraint of the vehicle is defined as:
|
| 580 |
+
|
| 581 |
+
$$
|
| 582 |
+
b (\boldsymbol {x}) = (s - s _ {0}) ^ {2} + (d - d _ {0}) ^ {2} - R ^ {2} \geq 0, \tag {31}
|
| 583 |
+
$$
|
| 584 |
+
|
| 585 |
+
where $(s_0, d_0) \in \mathbb{R}^2$ is the location of the obstacle in the curvi-linear frame (i.e., defined with respect to the reference trajectory), and $R > 0$ defines its size that is chosen such that the satisfaction of the above constraint can make the ego vehicle avoid crashing onto the obstacle.
|
| 586 |
+
|
| 587 |
+
Closed-loop testing. We test all of our models in a closed-loop manner in VISTA. In other words, at each time step, we get the front-view RGB image observation from VISTA. Then, the model generates a control based on the image. Finally, the control is used to drive the "virtual" vehicle in VISTA. This process is done recursively until the final time. The total number of testing runs is $N = 100$ for all the tables. The obstacles are randomly initialized (in uniform probability distribution) with lateral distance $d_0$ ranges from $\pm 0.1m$ to $\pm 1.5m$ . In Figs. 6 and 11, the ego vehicle is randomly initialized with $d \in [-0.5, 0.5]m$ (in uniform probability distribution).
|
| 588 |
+
|
| 589 |
+

|
| 590 |
+
|
| 591 |
+

|
| 592 |
+
|
| 593 |
+

|
| 594 |
+
|
| 595 |
+

|
| 596 |
+
|
| 597 |
+

|
| 598 |
+
Figure 10: Attention-based image observations for the ABNet-att model. From left to right and top to down: attentions on full image, left-most part, left lane boundary, lane center, right lane boundary, and right-most part.
|
| 599 |
+
|
| 600 |
+

|
| 601 |
+
|
| 602 |
+
Image observations for the ABNet-att model. We generate the attention-based observations as shown in Fig. 10. Each of the attention images may play an important role in a specific driving scenario (e.g., attention on the left-most part may be crucial for sharp-left turn).
|
| 603 |
+
|
| 604 |
+
Acceleration control profiles. We present both the acceleration control and steering rate control profiles in Fig. 11. Both the BNet and BNet-UP models have forced the ego vehicle to have a large deceleration instead of making it to pass the obstacle using the steering control when the vehicle approaches the obstacle. This can make the ego vehicle get stuck at the obstacles, and thus, the obstacle passing rate (as shown in Table 3) is low in these two models.
|
| 605 |
+
|
| 606 |
+
Ablation studies on the model robustness in terms of safety under noisy input. To further test the model safety robustness, we add random noise (50% magnitude of the image values) to all the image observations. The results are presented in Table 4. Our proposed ABNets can still guarantee the safety of the vehicle under noisy input (0% crash rate), while the crash rates using other models significantly increase except the DFB model. This is because the HOCBFs in the DFB model are not trainable, and the corresponding parameters are fixed. Badly trained HOCBFs could make the method fail to guarantee safety due to the inter-sampling effect.
|
| 607 |
+
|
| 608 |
+

|
| 609 |
+
Figure 11: Vision-based end-to-end autonomous driving closed-loop testing control profiles. The models directly take images as inputs, and output controls for the vehicle. All the testings are done in closed-loop in VISTA.
|
| 610 |
+
|
| 611 |
+

|
| 612 |
+
|
| 613 |
+
Table 4: Ablation study: vision-based end-to-end autonomous driving closed-loop testing under noise and comparisons with benchmarks. Items in the first row are short for obstacle crash rate (CRASH), Obstacle passing rate (PASS), satisfaction of safety constraints where non-negative values mean safety guarantees (SAFETY), system conservativeness (CONSER.), acceleration control $u_{1}$ uncertainty ( $u_{1}$ UNCERTAINTY), steering rate control $u_{2}$ uncertainty ( $u_{2}$ UNCERTAINTY), and theoretical safety guarantees (THEORET, GUAR.) respectively. In the model column, items are short for single vanilla end-to-end driving model (V-E2E), E2Es merged with Monte-Carlo Dropout (E2Es-MCD), E2Es merged with deep resembles (E2Es-MERG), deep forward and backward model (DFB), single BarrierNet (BNET), BarrierNet policies with uncertainty propagation (BNET-UP), ABNet with 10 heads (ABNET), ABNet with attention images and 10 heads (ABNET-ATT), ABNET-SC denotes our ABNet first trained with ABNET-ATT scaled by ABNET (20 heads) respectively. The safety metric is defined as the minimum value of the safety specification $b_{j}(\boldsymbol{x})$ , $j \in S$ among all runs. The conservativeness metric is defined as the mean (with std) of the minimum value (in each run) of the safety specification $b_{j}(\boldsymbol{x})$ , $j \in S$ among all runs. The uncertainty metrics for both $u_{1}$ and $u_{2}$ are measured by the standard deviations of the model outputs (two controls) among all runs.
|
| 614 |
+
|
| 615 |
+
<table><tr><td>MODEL</td><td>CRASH(↓)</td><td>PASS(↑)</td><td>SAFETY(≥0)</td><td>CONSER.(≥0&↓)</td><td>u1UNCERTAINTY(↓)</td><td>u2UNCERTAINTY(↓)</td><td>THEORET.GUAR.</td></tr><tr><td>V-E2E (AMINI ET AL., 2022)</td><td>31%</td><td>69%</td><td>-59.455</td><td>-8.932±19.741</td><td>0.529</td><td>0.239</td><td>×</td></tr><tr><td>E2ES-MCD (GAL &GHAHRAMANI, 2016)</td><td>28%</td><td>72%</td><td>-58.405</td><td>-8.116±20.802</td><td>0.524</td><td>0.232</td><td>×</td></tr><tr><td>E2ES-DR (LAKSHMINARAYANAT EL., 2017)</td><td>27%</td><td>73%</td><td>-60.267</td><td>-8.781±20.910</td><td>0.512</td><td>0.225</td><td>×</td></tr><tr><td>DFB (PEREIRA ET AL., 2020)</td><td>1%</td><td>37%</td><td>-13.281</td><td>-0.256±4.348</td><td>0.482</td><td>0.127</td><td>√</td></tr><tr><td>BNET (XIAO ET AL., 2023)</td><td>23%</td><td>37%</td><td>-45.415</td><td>-9.114±13.382</td><td>0.730</td><td>0.316</td><td>√</td></tr><tr><td>BNET-UP (WANG ET AL., 2023B)</td><td>24%</td><td>39%</td><td>-44.634</td><td>-8.866±13.167</td><td>0.747</td><td>0.278</td><td>×</td></tr><tr><td>ABNET (OURS)</td><td>0%</td><td>100%</td><td>4.268</td><td>8.315±2.147</td><td>0.151</td><td>0.326</td><td>√</td></tr><tr><td>ABNET-ATT (OURS)</td><td>0%</td><td>100%</td><td>5.986</td><td>7.032±0.405</td><td>0.118</td><td>0.213</td><td>√</td></tr><tr><td>ABNET-SC (OURS)</td><td>0%</td><td>100%</td><td>4.118</td><td>7.515±1.120</td><td>0.128</td><td>0.255</td><td>√</td></tr></table>
|
abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ea9d4785c9158ef0f5773a4a62c0269abea7ace0d3f5889da1b29859d5230839
|
| 3 |
+
size 1201298
|
abnetadaptiveexplicitbarriernetforsafeandscalablerobotlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ee459a7ec7426bef79f82d2baaaa2532df989c034d1aac73a51cd677bb07bfa
|
| 3 |
+
size 681679
|
accelerateddiffusionmodelsviaspeculativesampling/69ab461c-709a-4477-81b9-473befc6be24_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:48940f76beea29f20a7fcfce331fc8bb92453bc77bcbf461f12c205b12442be0
|
| 3 |
+
size 312072
|
accelerateddiffusionmodelsviaspeculativesampling/69ab461c-709a-4477-81b9-473befc6be24_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dec2cd4491b9a809cbc043d213bfffd82b0cfb5461f250356510afa7b047f94b
|
| 3 |
+
size 363599
|
accelerateddiffusionmodelsviaspeculativesampling/69ab461c-709a-4477-81b9-473befc6be24_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f3b6f8d6f0299df0c79fd0f1c4857d9f87ef5e8c42f874dde3cb6dd6b1f9961
|
| 3 |
+
size 2938091
|
accelerateddiffusionmodelsviaspeculativesampling/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
accelerateddiffusionmodelsviaspeculativesampling/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e89507a4ed0198e604ce7b0f04ec11bccc008e3069e3dd6492cc8a6c557a473d
|
| 3 |
+
size 2641573
|
accelerateddiffusionmodelsviaspeculativesampling/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:225693367c3da58a725a3549fed602a4dce2097f41d4b753247e2f3d925bd428
|
| 3 |
+
size 1928463
|
acceleratinglargelanguagemodelreasoningviaspeculativesearch/421c4b62-cf3e-458b-b763-7a580b21e488_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a8c0b08d28329a708f93bd1efc0b25be10605e17ce59acf5e39b577a15530e0d
|
| 3 |
+
size 211567
|
acceleratinglargelanguagemodelreasoningviaspeculativesearch/421c4b62-cf3e-458b-b763-7a580b21e488_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:542d10a4bc2f6721874d682619f96ea306e51ec98746095d27a3c55d07fb8db8
|
| 3 |
+
size 251730
|
acceleratinglargelanguagemodelreasoningviaspeculativesearch/421c4b62-cf3e-458b-b763-7a580b21e488_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:90661bb325709885baef428502077673085647748fe79d35ffa23e6207286b9e
|
| 3 |
+
size 795250
|
acceleratinglargelanguagemodelreasoningviaspeculativesearch/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
acceleratinglargelanguagemodelreasoningviaspeculativesearch/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2847a510a56463b51f3c84eb2789fd9593984d34e65772ee4bd77013f818eff7
|
| 3 |
+
size 1251975
|
acceleratinglargelanguagemodelreasoningviaspeculativesearch/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6090cecf7a41ed2cc99257c870c947ee196849d5551be82f5f0beefe460bf542
|
| 3 |
+
size 1093460
|
acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/6e68aa2b-d20f-464e-8322-2e96f58dc240_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:291823dc6413aea3da059a2a6376edbd49c978495a857f24b75b7528c71889a9
|
| 3 |
+
size 171572
|
acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/6e68aa2b-d20f-464e-8322-2e96f58dc240_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d86b0d197c48507f25e70150a88db07b7276c5e22b27548c5a5536d38d36c05
|
| 3 |
+
size 200610
|
acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/6e68aa2b-d20f-464e-8322-2e96f58dc240_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:49a63dafaad60cdfba3544ba3e49f53061e2f8605c2d90d293ef6d9b3b26b5b3
|
| 3 |
+
size 681948
|
acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:803a273d43458ba964ec54b93db99dd2fff84a3520b3904e0902caa157f7ca3e
|
| 3 |
+
size 1407866
|
acceleratingllminferencewithlosslessspeculativedecodingalgorithmsforheterogeneousvocabularies/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ae1a9f395ee6be11493e5589aac56f8dc712b25aa46e435bacfac608d096d27e
|
| 3 |
+
size 897399
|
addqadaptivedistributionaldoubleqlearning/42431bfd-4f0e-441e-9c8a-947af21cd543_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:64206ab6f0c89252fcabd8889f45b71c9edf81c8946c1bb0c688426410456a7b
|
| 3 |
+
size 317713
|
addqadaptivedistributionaldoubleqlearning/42431bfd-4f0e-441e-9c8a-947af21cd543_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aa42aff04887ae66409f10bbc7c9b4270fb02f0e4e52694fb6ecd11f13cfa070
|
| 3 |
+
size 360933
|
addqadaptivedistributionaldoubleqlearning/42431bfd-4f0e-441e-9c8a-947af21cd543_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:84a37ae895e4b76becbc1b0c6bac8aaf3ed2db17a33864598fcc1ae178c19ec8
|
| 3 |
+
size 12691043
|
addqadaptivedistributionaldoubleqlearning/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
addqadaptivedistributionaldoubleqlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a1a497f078be36b96a3ba39d22717eadff2ca8d6c296c088454b779285d88ab6
|
| 3 |
+
size 4174943
|
addqadaptivedistributionaldoubleqlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:44a110ec45be95f0f437302705ed461d6fe2acf238fc53ed7510356870f76e9f
|
| 3 |
+
size 2084351
|
adhmraligningdiffusionbasedhumanmeshrecoveryviadirectpreferenceoptimization/a763c3ea-e1d0-42c6-96b4-e51045315b61_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:368bd51e4890c9965713daa0dd40fcf35cf6bfb8ff0e4c98fba1c931fcc38cc7
|
| 3 |
+
size 86203
|
adhmraligningdiffusionbasedhumanmeshrecoveryviadirectpreferenceoptimization/a763c3ea-e1d0-42c6-96b4-e51045315b61_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3a1093328b04cf829a3d1064ecba055fd7031aec88c13ab8251659f4abde3143
|
| 3 |
+
size 106654
|