diff --git a/.gitattributes b/.gitattributes index 5bac703b1e24592ac1e72e0b196f37d2cc86d1cc..122c9a4d8d003253e73fe3075e1b92affa42025b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1164,3 +1164,11 @@ data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_origin.pdf data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10356/15a8c18d-57d9-46bd-bbe1-f5ca7eeeb023_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10368/75b91eea-3940-4d8f-9204-ff0cff897b91_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10415/6118f0df-c806-4166-9486-ac165b1c4226_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10445/92de7eea-1f86-4346-b55b-8d273a167685_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10449/7f439293-0959-4bd1-95b9-6ff52e6c616f_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10458/7fea48cf-977d-4933-8361-31658163081b_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10462/963870cb-6527-42ff-97aa-d1b9f35a156b_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10465/9d3901f2-eace-4793-8815-51f41b459e25_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_10xxx/2504.10356/15a8c18d-57d9-46bd-bbe1-f5ca7eeeb023_content_list.json b/data/2025/2504_10xxx/2504.10356/15a8c18d-57d9-46bd-bbe1-f5ca7eeeb023_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..998d8533e7e9007457c006a865486e961b872d17 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/15a8c18d-57d9-46bd-bbe1-f5ca7eeeb023_content_list.json @@ -0,0 +1,2647 @@ +[ + { + "type": "text", + "text": "MultiLoKo: a multilingual local knowledge benchmark for LLMs spanning 31 languages", + "text_level": 1, + "bbox": [ + 227, + 122, + 769, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dieuwke Hupkes* Nikolay Bogoychev*", + "bbox": [ + 357, + 224, + 637, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Meta", + "bbox": [ + 480, + 241, + 517, + 253 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{dieuwkehupkes,nbogoych}@meta.com", + "bbox": [ + 364, + 253, + 633, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 304, + 537, + 319 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present MultiLoKo, a new benchmark for evaluating multilinguality in LLMs covering 31 languages. MultiLoKo consists of three partitions: a main partition consisting of 500 questions per language, separately sourced to be locally relevant to the specific language, and two translated partitions, containing human-authored translations from 30 non-English languages to English and vice versa. For comparison, we also release corresponding machine-authored translations. The data is equally distributed over two splits: a dev split and a blind, out-of-distribution test split. MultiLoKo can be used to study a variety of questions regarding the multilinguality of LLMs as well as meta-questions about multilingual benchmark creation. We compute MultiLoKo scores for 11 base and chat models marketed to be multilingual and study their average performance, their performance parity across languages, how much their ability to answer questions depends on the question language, and which languages are most difficult. None of the models we studied performs well on MultiLoKo, as indicated by low average scores as well as large differences between the best and worst scoring languages. Furthermore, we find a substantial effect of the question language, indicating suboptimal knowledge transfer between languages. Lastly, we find that using local vs English-translated data can result in differences more than 20 points for the best performing models, drastically change the estimated difficulty of some languages. For using machines instead of human translations, we find a weaker effect on ordering of language difficulty, a larger difference in model rankings, and a substantial drop in estimated performance for all models.", + "bbox": [ + 228, + 334, + 767, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 662, + 310, + 678 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With the growing presence and deployment of LLMs across the world, evaluating their abilities in languages other than English becomes more and more eminent. Yet, studying and evaluating multilinguality in LLMs remains a challenging enterprise, and it is hardly exaggerated to call the current state of multilingual evaluation in LLMs insufficient. Older multilingual benchmarks such as PAWS-X (Zhang et al., 2019), XNLI (Conneau et al., 2018) or XCOPA (Ponti et al., 2020) often do not fit the demands for evaluating auto-regressive models and are rarely used to evaluate recent models. Furthermore, their coverage of languages is relatively small compared to the number of languages in which LLMs are intended to be proficient.", + "bbox": [ + 169, + 691, + 823, + 805 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "More often used for LLM evaluation are benchmarks translated from English, such as MGSM (translated GSM8K, Shi et al., 2023), MMMLU (tranlated MMLU, OpenAI, 2025) or (less frequently) Belebele (Bandarkar et al., 2024). These benchmarks provide good coverage over many languages, but using translated data comes with its own set of issues. One such issue is that even when human-rather than machine-authored translations are used, translated data is known to differ from native text in several ways (Clark et al., 2020). Furthermore, using translated benchmarks imposes a strong", + "bbox": [ + 169, + 806, + 826, + 891 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10356v2 [cs.CL] 15 Apr 2025", + "bbox": [ + 22, + 263, + 60, + 707 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contributions", + "bbox": [ + 191, + 898, + 318, + 912 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "English-centric bias: translated data may be multilingual on the surface, it is not in its content. The benchmarks MLQA (Lewis et al., 2020) and TidyQA (Clark et al., 2020) to some extent address the issue by sourcing data separately for different languages. Even in their sourcing protocols, however, there is no explicit focus on selecting locally relevant content for the chosen languages. In addition to that, their coverage is again small compared to the above mentioned translated benchmarks.", + "bbox": [ + 169, + 90, + 823, + 161 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In response to these issues, we introduce a wide-coverage multilingual benchmark with locally-sourced questions for 31 different languages. Because the benchmark targets multilingual local knowledge, we dub it MultiLoKo. The release of MultiLoKo serves two interconnected goals:", + "bbox": [ + 169, + 162, + 826, + 205 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Provide a better means to evaluate multilinguality in LLMs;", + "2) Provide data to study the effect of various design choices in multilingual evaluation." + ], + "bbox": [ + 207, + 208, + 781, + 238 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address our first goal, we create 500 questions per language, written from scratch for each language, using a sourcing protocol specifically designed to ensure local relevance of the question topics. To also reap the benefits of parallel data, we commissioned both human and machine-authored translations for all non-English questions into English and vice versa, providing a total of 15500 parallel questions, sourced across the 31 languages in the benchmark. The translated data facilitates the study of transfer between languages and also serves our second goal. By comparing the English-translated data with the locally sourced data, we can explicitly compare the adequacy of using translated benchmarks; by comparing human- with machine-authored translations, we can better estimate the potential issues of the latter. To prevent quick overfitting and inadvertent contamination, we release a development set of the benchmark, while test scores can only be obtained through an external provider.3", + "bbox": [ + 169, + 241, + 826, + 380 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We provide elaborate analyses for both our goals. We compute average performance and language parity scores on the locally sourced data for 11 models marketed for their multilinguality (§ 5.1); we investigate whether these models exhibit knowledge transfer between different languages (§ 5.2); we study the impact of local sourcing versus translating on model rankings and language difficulty (§ 5.4.1); we analyse the difficulty of the included languages through various lenses (§ 5.3); and we conduct an analysis into the difference between human- and machine-authored translation (§ 5.4.3).", + "bbox": [ + 169, + 383, + 823, + 465 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We find that, of the models we consider, the best performing model is Gemini 2.0 Flash, with an average performance of 34.4 points, and an almost 35 point gap between the best and the worst language. Llama 3.1 405B and GPT4-o are close contenders in terms of average scores (34.3 and 34.0, respectively), but both have substantially higher language gaps (39 and 49 points). Almost across the board, model performances are better when questions are asked in the language to which the content is relevant, indicating suboptimal knowledge transfer between languages, a result that is mirrored by low response-consistency across question language.", + "bbox": [ + 169, + 468, + 823, + 566 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Next, we study the relevance of using locally sourced data as opposed to translated English data as well as whether it matters if translations are authored by humans or machines. We find that the estimated difficulty of some languages changes drastically across the two sourcing setups, within the range of 15 points decrease and 8 points increase on average across models. The rank correlation between average language difficulty score is 0.78. Furthermore, individual model scores between locally and English-translated data can differ up to 22 points for some languages. However, changing the sourcing setup does not impact model rankings, suggesting that using translated data may be suitable for comparing models but less for model development or language prioritisation. For using machine- instead of human-authored translations, as well, the effect on model ranking is limited $(R = 0.97)$ , but the difficulty estimates of various languages changes with up to 12 points. Furthermore, using machine translated data results in lower average scores for all models, with drops ranging from 2 to $34\\%$ of the human-translated scores.", + "bbox": [ + 169, + 568, + 826, + 734 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Outline In the remainder of this paper, we first describe our dataset collection protocol and the dataset itself in § 2 and § 3, respectively. In § 4, we describe our experimental setup. In § 5, we present a range of different results, covering (among other things), the summary of results described above. We conclude in § 6. As we discussed quite some related work above, we do not include a separate related work section in the main paper, but we do provide a discussion of a wider range of multilingual datasets in Appendix E.", + "bbox": [ + 169, + 741, + 826, + 825 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "${}^{2}$ An exception to this is the benchmark EXAMS (Hardalov et al., 2020),which consists of exams separately sourced for each language. For reasons unknown to the authors of this work, it was never used for any prominent LLM release, with the exception of (Dubey et al., 2024), who deployed it for training rather than evaluation.", + "bbox": [ + 169, + 832, + 823, + 872 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "3The MultiLoKo data, five few-shot examples per language, an evaluation script, a set of language-specific prompts, and information about test-score submissions can be found at https://github.com/facebookresearch/multiloko/.", + "bbox": [ + 169, + 873, + 823, + 910 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Dataset collection", + "text_level": 1, + "bbox": [ + 171, + 89, + 354, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The main data collection protocol of MultiLoKo is similar to the protocol used by the well-known benchmark SQuAD (Rajpurkar et al., 2016): we source articles from Wikipedia and ask annotators to generate questions about paragraphs sampled from these articles. After that, we run several rounds of quality control on the generated questions and commission human- and machine-authored translations of all data. Our collection protocol consists of five steps.", + "bbox": [ + 169, + 121, + 823, + 191 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step 1: Paragraph selection The first step in our protocol is the sampling of the 6K most visited Wikipedia pages for each language for the period of 2016-2021. We sample paragraphs from those pages by randomly selecting a word in the page and expanding left and right until we reach 3K characters. Next, we ask annotators to judge the local relevance of the samples on a scale from 1 to 5, where 1 refers to topics specific to the language (e.g. a Swedish singer not known outside of Sweden) and 5 to globally well-known topics (e.g. 'Youtube'). We disregard all topics that have a locality score above 3. The full rubric and annotation instructions can be found in Appendix D.1.", + "bbox": [ + 169, + 199, + 826, + 297 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step 2: Question generation In step 2, we ask native speakers to generate challenging questions about the content in the paragraphs. To facilitate automatic scoring, we ask that the questions are closed-form questions, with only one correct short answer. To ensure that the annotation instructions are understandable and appropriate for each locale and the questions of high quality, we run a pilot with 50 questions separately for each language. After our pilot, we commission 500 additional samples for each language, to leave a $10\\%$ margin to disregard questions in the rest of the process.", + "bbox": [ + 169, + 306, + 823, + 391 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step 3: Question review For each generated question, we ask a new set of annotators from a separate provider to judge whether the generated questions abide by the annotation instructions, to flag any possible issues, and to mark if the question is useable as is, would be useable with a small adaptation or should be disregarded. We ask annotators to fix small annotators on the spot, and as respective vendors that questions with larger issues are replaced.", + "bbox": [ + 169, + 398, + 823, + 469 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step 4: Question answering As a last quality control step, we ask two annotators different from the creator of the question to answer the questions. In this stage, we do not ask annotators to correct questions, but we simply disregard all questions for which either annotator thinks the original answer was incorrect, or the annotator provided an answer not matching the original answer because of ambiguities in the question. The only corrections we allow in this stage are additions of additional, semantically equivalent, correct answers (e.g. 'four' as an alternative to '4').", + "bbox": [ + 169, + 477, + 826, + 561 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step 5: Translation Lastly, we translate the non-English data back to English and vice versa. This effort serves two purposes. First, it allows to study generalisation of knowledge and skills between English and non-English languages through a direct comparison of the same questions. Second, it facilitates inspection of the topics and questions for all languages of the dataset, without the need to be able to speak all those languages. As automatic translation of benchmarks is relatively common practice in the field (e.g. Li et al., 2024), we commission both human and machine translations and study their difference as part of our analysis. For the machine translations, we use Google Translate sentence based cloud API. $^{4}$", + "bbox": [ + 169, + 569, + 826, + 680 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 MultiLoKo the dataset", + "text_level": 1, + "bbox": [ + 171, + 700, + 397, + 717 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "MultiLoKo consists of three main components: i) the collected data; ii) a set of multilingual prompts to prompt base- and chat models; and iii) a set of metrics.", + "bbox": [ + 169, + 733, + 823, + 762 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 The collected data", + "text_level": 1, + "bbox": [ + 171, + 779, + 341, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The data in MultiLoKo consists of several partitions and two splits.", + "bbox": [ + 169, + 805, + 616, + 821 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Partitions MultiLoKo contains one main partition, containing locally-soured data for 31 languages, including English. In addition to that, it contains four translated partitions. Two of those are human-translated partitions: human-translated-from-english, consisting of human-authored translations of English data into the 30 other languages in MultiLoKo,", + "bbox": [ + 169, + 828, + 826, + 885 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "4https://cloud.google.com/translate?hl $\\equiv$ en", + "bbox": [ + 191, + 896, + 496, + 911 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "human-translated-to-english containing human-authored translations of the non-English subsets into English. The other two are machine-translated partitions following the same pattern: machine-translated-from- english, contains machine-authored translations of English data into 30 other languages, and machine-translated-to- english contains machine-authored translations of the non-English subsets into English. All partitions contain 500 samples per language - thus in total 15500 samples in the main partition, and 15000 samples in the translated partitions. Statistics about the dataset such as the distribution over answer types and the average prompt length can be found in Appendix A. Results relating to the difficulty of the benchmark can be found in $\\S 5$ .", + "bbox": [ + 169, + 90, + 826, + 203 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Splits Each partition is divided equally over two splits: a dev split that can be used for development, and a blind test split. Each of these splits thus contains 250 samples per language. Until the test split is publicly released, results can only be obtained through model submissions. The splits are not random, but constructed such that for each language the most frequently visited pages are in the dev split while the least frequently visited pages are in the test split, roughly preserving the distribution of answer types (e.g. number, name, year, etc). The test split can thus be seen as an out-of-distribution (ood) split, specifically meant to assess generalisation (which is challenging in the context of LLMs, see e.g. Hupkes et al., 2023). In § 5.4.2 we provide an analysis of the extent to which the split is truly an ood split, by analysing its difficulty. The results reported in the results section of the paper are dev results.", + "bbox": [ + 169, + 210, + 826, + 349 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Prompts and few-shot examples", + "text_level": 1, + "bbox": [ + 171, + 367, + 434, + 382 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Running MultiLoKo requires prompts. In the spirit of getting truly multilingually appropriate results, we design prompts separately for each language and release them along with the data. The prompts are written by different linguistic experts for the various languages, in consultation with the benchmark creators to ensure they are appropriate for LLMs. We provide prompts for base models and chat models that allow for incorporating up to five few-shot examples, which we also provide.6 All prompts and few-shot examples can be found in the MultiLoKo repository.", + "bbox": [ + 169, + 392, + 823, + 476 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Metrics", + "text_level": 1, + "bbox": [ + 171, + 493, + 266, + 507 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MultiLoKo has two main metrics and two auxiliary metrics. The two main metrics - Exact Match accuracy (EM) and Gap - capture the overall performance of MultiLoKo and are computed on the main partition, whereas the two auxiliary metrics - Mother Tongue Effect (MTE) and Locality Effect (LE) - combine information from different partitions. We provide a cheat-sheet in Table 1.", + "bbox": [ + 169, + 518, + 823, + 575 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "EM and Gap EM indicates the performance of a model on a single language or averaged across languages, as measured by the percentage of times the model (after post-processing) provides an answer that verbatim matches one of the answers in the reference list. Gap, defined as the difference between the best and the worst performing language in the benchmark, is a measure of parity across the individual languages within the benchmark. Taken together, EM and Gap provide a good indication of how well a model is faring on MultiLoKo. Because both gap and EM are binary metrics that may be open to false negatives, we also considered the partial match metrics BLEU (Papineni et al., 2002), ChrF (Popovic, 2015) and contains. We did not find any novel patterns using those metrics, but include them in our implementation for future research.", + "bbox": [ + 169, + 583, + 823, + 708 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MTE Because of the 2x2 design of MultiLoKo, in which we translated non-English data back to English and vice versa, we can compute several metrics related to locality of the requested information. MTE is one of such metrics. It expresses the impact of asking a question in a language to which that question is relevant. We quantify MTE (for non-English languages only), as the difference between the EM score of the locally sourced data asked in the corresponding language (e.g. asking a question about a local Bengali radio station in Bengali) and the EM score when the same questions are asked in English. A positive MTE indicates that information is more readily available when it is relevant to the language in which it was asked, whereas a negative MTE indicates that the information is more easily accessible in English. MTE is a measure related to transfer as well as language proficiency.", + "bbox": [ + 169, + 715, + 826, + 843 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "5More details can be found at https://github.com/facebookresearch/multiloko/.", + "bbox": [ + 189, + 845, + 712, + 859 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "6In several recent works it has been shown that prompts can have a substantial impact on model scores (e.g. Weber et al., 2023; Mizrahi et al., 2024). Given the large number of languages in the benchmark and the fact that those are not all mastered by the main authors, we did not include a systematic search through prompts, but presented our best-effort results.", + "bbox": [ + 169, + 859, + 826, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/8d54153ccb122aaea2874537a950dad799895f33acc86ca682873fd95442f949.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Average EMThe first main metric we use to quantify performance for MultiLoKo is the average Exact Match score across languages, which expresses how many of the answers match one of the gold standard answers verbatim (after post-processing the answers).
GapThe second main metric is the gap between a model's best and worst performing language. We gap to quantify the extent to which a model has achieved parity across languages. Because a small gap can be achieved both through parity on high scores as parity on low scores, it is most informative in combination with average benchmark performance.
Mother tongue effect (MTE)MTE expresses the impact of asking questions in a language in which the requested information is locally salient, compared to asking it in English. A positive MTE indicates information is more readily available in the language it was (likely) present in the training data, whereas a negative mother tongue effect indicates the information is more easily accessible in English.
Locality effect (LE)LE quantifies the effect of using locally sourced vs translated data. It is measured by computing the difference between scores for locally sourced data and translated English-sourced data. A positive LE implies that using translated English data underestimates performance on a language, a negative LE that using translated English data overestimates performance.
", + "bbox": [ + 176, + 88, + 823, + 296 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1: MultiLoKo metric cheatsheet. We use several metrics to quantify model performance using MultiLoKo. This table provides a cheatsheet for their meaning.", + "bbox": [ + 169, + 301, + 823, + 333 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "LE The locality effect (LE) is a measure of how much performance on knowledge tasks is over- or underestimated through the use of using translated English data, as opposed to locally relevant data. We quantify the locality effect as the difference in EM for English translated data and locally sourced data. If for a language the English translated data has as a higher EM, the LE is positive, indicating that using English translated data likely overestimating a model's ability on providing knowledge for that language. If the LE is negative the English translated data may provide an underestimation of the score for that language. Note that because we often observe both positive and negative LEs for the 30 non-English languages in MultiLoKo, the average LE across languages may be small, even if the differences for individual languages may be large.", + "bbox": [ + 169, + 369, + 826, + 494 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Experimental setup", + "text_level": 1, + "bbox": [ + 171, + 516, + 372, + 535 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We test and showcase our benchmark by running experiments with 11 different models of varying sizes, that were all marketed to have multilingual abilities.", + "bbox": [ + 169, + 547, + 823, + 578 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Models", + "text_level": 1, + "bbox": [ + 171, + 589, + 266, + 604 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To test the extent to which MultiLoKo provides useful signal across training stages, we consider both base and chat models. The base models we include in our experiments are Llama 3.1 70B and 405B (Dubey et al., 2024), Mixtral 8x22B (team, 2024), and Qwen 2.5 72B (Qwen et al., 2025), the seven chat models are Gemini 2.0 Flash (Google DeepMind, 2024), GPT4-o (OpenAI et al., 2024), Claude 3.5 Sonnet (Anthropic, 2025), Llama 3.1 70B and 405B Chat, Mixtral 8x22B-it, and Qwen 2.5 72B instruct. As mentioned before, we run chat and base models with separate prompts.", + "bbox": [ + 169, + 614, + 823, + 700 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2 Experimental setup", + "text_level": 1, + "bbox": [ + 171, + 718, + 351, + 734 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We run all of our experiments with the generation temperature set to 0. To facilitate automatic evaluation, we include an instruction to answer questions curtly and precisely, producing only a number/name/location/etc. Full template information can be found in our github repository.", + "bbox": [ + 169, + 744, + 823, + 787 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Few-shot prompting For base models we use a 5-shot prompt. For chat models, we use a 0-shot prompt, as this is the most likely use mode by chat model users.", + "bbox": [ + 169, + 792, + 823, + 821 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Post-processing Because base models are good at following the instructions, minimal postprocessing is needed: we only lowercase the output and strip punctuation. Chat models often deviate from the required format, especially in English, in various ways that we discuss in Appendix B. To evaluate such models beyond their instruction-following issues, we perform more complex post-processing, aiming to remove any words resembling \"answer\" from the LLM output, as well as several special cases for English and Japanese. We provide full details about post-processing in Appendix C.", + "bbox": [ + 169, + 828, + 826, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/187a2566712d7e7b5269313bad6b1bd1cade176fdb265f30224cbd55ff0b8bdc.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelEMGapMother tongue effectLocality effect
Gemini 2.0 Flash34.39± 2.9034.806.12± 1.900.36± 3.40
Llama 3.1 405B34.31± 2.7039.206.37± 1.700.62± 2.70
GPT4-o33.97± 3.6048.803.08± 2.000.35± 2.90
Llama 3.1 405B Chat27.70± 3.2040.803.97± 2.20-1.11± 2.70
Llama 3.1 70B26.92± 2.6028.802.72± 1.70-0.30± 3.10
Claude 3.5 Sonnet26.89± 4.4047.6024.18± 4.200.81± 2.90
Llama 3.1 70B Chat21.65± 2.8042.400.49± 1.60-3.32± 3.30
Mixtral 8x22B21.64± 4.2043.60-2.18± 3.00-0.65± 2.60
Qwen2.5 72B19.66± 2.3028.402.45± 2.10-2.28± 2.70
Mixtral 8x22B-it10.10± 3.1039.20-5.41± 2.00-0.54± 1.70
Qwen2.5 72B instruct2.54± 0.708.00-1.52± 1.000.43± 0.70
", + "bbox": [ + 194, + 88, + 794, + 271 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2: Aggregate results dev. We report average EM, gap, mother tongue effect and locality effect for all 11 models on the MultiLoKo dev split. For EM, MTE and LE, we also indicate a confidence interval equal to two times the standard error across languages. Models are sorted by average EM.", + "bbox": [ + 169, + 275, + 823, + 316 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Results", + "text_level": 1, + "bbox": [ + 171, + 349, + 267, + 366 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As MultiLoKo has several partitions, there are many different results that can be computed. On a high level, we consider four different types of results. First, in § 5.1, we report average model results across several categories, including the average performance and an indicator of parity across languages. Next, in § 5.2, we dive deeper into the knowledge transfer occurring from one language to another, within individual models. In § 5.3, instead, we focus on differences between individual languages. Lastly, in § 5.4, we look in more detail at the dataset itself through the lens of model results, considering in particular the effect of locally sourcing data as opposed to translating English sourced data (§ 5.4.1), differences between our dev and test split (§ 5.4.2) and the difference between using human and machine translated data (§ 5.4.3).", + "bbox": [ + 169, + 381, + 823, + 506 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Aggregate results: EM and language gap", + "text_level": 1, + "bbox": [ + 171, + 522, + 498, + 537 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Table 2, we provide a summary of the average dev results. Specifically, for each model, we report average EM and the gap between the best and the worst language, along with average MTE and LE, which we will discuss in a later section.7 We report average MTE, EM and LE along with a confidence interval equal to two times the standard error across languages, roughly equalling previously used $95\\%$ confidence intervals (Madaan et al., 2024; Dubey et al., 2024).", + "bbox": [ + 169, + 547, + 823, + 618 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1.1 Model performance (EM)", + "text_level": 1, + "bbox": [ + 171, + 632, + 405, + 647 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Figure 1a, we show a boxplot of the distribution of the EM scores across models, ordered by average EM. The best performing models are Gemini 2.0 Flash, Llama 3.1 405B, and GPT4-o, while Mixtras 8x22B and the Qwen2.5 72B populate the lower rankings on the list. Somewhat surprisingly, base models are generally outperforming chat models on the benchmark, this is partly due to false refusals and poor instruction following in the chat models. In some cases, however, the chat models simply just provide a qualitatively different answer than the base models. The figure shows that MultiLoKo is a relatively difficult benchmark across the board: the average EM of even the best performing model barely exceeds 30, while the bottom performing models have EM scores lower than 20. Also scores for the easiest languages (see also § 5.3) are capped below 50. Furthermore, for virtually all models performance varies starkly between languages, suggesting that none of the models we considered are evenly multilingual across the 31 languages in MultiLoKo.", + "bbox": [ + 169, + 655, + 826, + 808 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1.2 Gap", + "text_level": 1, + "bbox": [ + 171, + 821, + 256, + 837 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "While average EM score provides some information about a model's multilingual abilities, the same EM score can hide many different patterns regarding individual language scores. As we appreciate it is not always practical to consider 31 separate EM scores in model development, we add a second", + "bbox": [ + 169, + 845, + 823, + 888 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "7A metric cheatsheet can be found in Table 1.", + "bbox": [ + 189, + 896, + 462, + 910 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ba1c71502e3d1b62d80b80df10464eeef839823bfd41082afe99302cc0c1fa68.jpg", + "image_caption": [ + "(a) EM scores" + ], + "image_footnote": [], + "bbox": [ + 178, + 89, + 524, + 281 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cc7d7f7d7a0ea73f1bd888448d909a38821f0cdcc30c4807be351475caeba83c.jpg", + "image_caption": [ + "(b) Gap", + "Figure 1: EM distributions and Gap dev. (a) Boxplot of observed EM scores for each model, sorted by mean. (b) Difference between the best EM and the worst of the N next best EM scores, per model." + ], + "image_footnote": [], + "bbox": [ + 531, + 92, + 823, + 281 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "summary metric to the main metrics of MultiLoKo: the gap between the best and worst performing languages, representative of the extent to which a model has achieved parity across languages.", + "bbox": [ + 169, + 362, + 823, + 393 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In Figure 1a, we already saw that the per-language scores have quite a range for all models. In Figure 1b, we study this in more detail, by considering the gap between the best language and the next N best language (30 corresponds to the full benchmark). On the right end of the plot, we see that already considering only 5 languages besides English, even the best perform has a gap of over five points - relatively large in absolute terms, very large in relative ones - between English and the worst of the remaining languages. For the second best two models, the top-5 gap even exceeds 10 points. As we include more languages, up to the full benchmark, the gap increases, with GPT4-0 showing gap of almost 50 points. The only models for which the gap is small are the models that have overall low performance and thus little space to drop from English, illustrating how gap and average EM provide complementary information about multilingual performance.", + "bbox": [ + 169, + 397, + 826, + 537 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2 Generalisation across languages", + "text_level": 1, + "bbox": [ + 171, + 551, + 434, + 566 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Next, we study whether knowledge generalises across languages or, in other words, whether knowledge transfers from one language to another.", + "bbox": [ + 169, + 577, + 826, + 606 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2.1 The mother tongue effect (MTE)", + "text_level": 1, + "bbox": [ + 171, + 619, + 452, + 633 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "First, we compare the EM of models when questions are asked in the language for which the questions were originally sourced with performance when the same questions are asked in English. We quantify this effect with the metric MTE, which expresses the difference in performance between these two settings (see § 3.3). In Figure 2a, we show MTE per language, averaged across models. For most languages, performance is higher when the question is asked in the language for which the question is locally relevant. The languages for which MTE is negative or close to 0 are virtually all languages that perform very poorly also in the mother tongue and for which there is therefore little room for further decrease. From one perspective, the improvements when questions are asked in the low-resource but native languages can be seen as surprising: as models perform much better in English than non-English languages, one may expect performances to go up as a consequence of that. On the other hand, similar 'mother tongue effects' have been observed in earlier studies. For example, Ohmer et al. (2024) found that models are comparatively better at answering factual questions about topics when they are asked in a language to which culture the fact pertains. It appears that also in our case,", + "bbox": [ + 169, + 643, + 826, + 825 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "With the used prompt, we could not get Claude 3.5 Sonnet to answer questions in English in an automatically parsable manner, leading to an abysmal score of 4.8 on the English sourced data and equally low scores on data translated into English. Examples of this issue can be found in Appendix B. Because this issue is not indicative of lack of knowledge or transfer, we excluded Claude 3.5 Sonnet from any of the transfer results in this section.", + "bbox": [ + 169, + 833, + 823, + 883 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "The exception to this rule is Hindi, which has a reasonable performance in the native language but nevertheless improves in English. We further discuss such language-specific points in $\\S 5.3$ .", + "bbox": [ + 169, + 883, + 823, + 911 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5e9488ee42bcffe9751990decbf76ca92024b4aaa74c5ac9b05c6f80f75e0642.jpg", + "image_caption": [ + "(a) Average MTE across models", + "(b) KDE of MTE scores" + ], + "image_footnote": [], + "bbox": [ + 176, + 92, + 591, + 267 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/02b5749be8a0b0cad9681d40427e51c88c0bc0e81cda587783d142cb140bf240.jpg", + "image_caption": [ + "Figure 2: Mother tongue effect dev. (a) Per language MTE for MultiLoKo dev, indicating the difference between questions asked in the mother tongue (locally relevant) and in English. Error bars indicate 2 times standard error across all models, excluding Claude 3.5 Sonnet. (b) KDE plot of the distribution of MTE scores for the top-3 performing models." + ], + "image_footnote": [], + "bbox": [ + 606, + 95, + 803, + 237 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "the effect of accessibility of information in a relevant language wins out over the generally stronger English performance, pointing to a gap in models' ability to generalise knowledge from one language to another.", + "bbox": [ + 169, + 380, + 823, + 422 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Figure 2b, we further consider the distribution of MTE scores for the top-3 models. Interestingly, this distribution is quite different between models. Despite having comparable average scores, the top-3 performing models differ in their MTE distributions across languages. Of the three models, GPT4-o has the smallest average effect (3.2); Llama 3.1 405B has a much higher average effect (6.6), but less probability mass on the more extreme ranges of the spectrum (min max values of $[-7, +12]$ vs $[-9, +13]$ ) Gemini 2.0 Flash is in the middle in terms of average (6.3), but shows the largest variation across languages $[-10, +16]$ .", + "bbox": [ + 169, + 429, + 826, + 527 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Note, however, that without studying the actual training data of the various models, it is possible to infer that all these models have relatively poor transfer across languages, but not conclusively say that one model is better than another: it is also possible that the information sourced for languages with better MTEs was simply better represented in the English data of a respective model.", + "bbox": [ + 169, + 532, + 823, + 589 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2.2 Consistency across responses", + "text_level": 1, + "bbox": [ + 171, + 609, + 426, + 625 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Another way to study transfer between languages is to look at the consistency of responses across languages (previously also used by Qi et al., 2023; Ohmer et al., 2023, i.a.). After all, it is possible for a model that has an EM of 30 on both English and another language to be nevertheless completely misaligned on which questions they respond to correctly. Studying consistency across responses can therefore be seen as a more direct way of studying whether knowledge is equally accessible across languages. Furthermore, consistency can be studied independently from accuracy, as it is possible for a model to have very good transfer, but be simply consistently wrong.", + "bbox": [ + 169, + 635, + 823, + 733 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In the dataset used by Ohmer et al. (2023), the correct answers (consisting of names, numbers and years) are identical across the languages they consider, while Qi et al. (2023) use a factual knowledge task that requires ranking outputs. Neither of their metrics can thus be directly applied in our case. Specifically, measuring consistency on incorrect responses – an important component of the work of Ohmer et al. (2023) because it can provide positive rather than negative evidence – would require assessing whether two answers in different languages are to be considered semantically equivalent, which is not practically feasible for our data. Rather, we opt for a simpler consistency metric, which quantifies what percentage of the questions that are answered correctly in either language are answered correctly in both languages.", + "bbox": [ + 169, + 738, + 826, + 864 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Figure 3a, we show the average consistency of all models (excluding again Claude Sonnet 3.5); for completeness, we also show the per-language consistency results in Figure 3b. The results confirm our earlier conclusion that much improvements can be made when it comes to knowledge transfer", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/b0918ad445a1cd7b34ff620e97c14ae4d7e0eab4778d0e71993ce777ae50542b.jpg", + "table_caption": [], + "table_footnote": [ + "(a) Consistency scores per model" + ], + "table_body": "
ModelConsistency
Gemini 2.0 Flash0.46± 0.04
Llama 3.1 405B0.46± 0.04
Llama 3.1 70B0.45± 0.03
GPT4-o0.45± 0.05
Llama 3.1 405B Chat0.42± 0.04
Qwen2.5 72B0.40± 0.04
Llama 3.1 70B Chat0.40± 0.04
Mixtral 8x22B0.36± 0.05
Mixtral 8x22B-it0.21± 0.05
Qwen2.5 72B instruct0.08± 0.03
", + "bbox": [ + 176, + 93, + 390, + 227 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/4292a85211fc2019ae5f9eb0d70959896e6e3c18041a561b5072bafe1218ace8.jpg", + "image_caption": [ + "(b) Consistency scores per language", + "Figure 3: Consistency results dev. (a) Average per-model consistency scores, $\\pm 2$ times the standard error across languages. (b) Boxplot of model consistency scores per language, indicating the relative overlap of correctly answered questions when asked in the mother tongue vs in English." + ], + "image_footnote": [], + "bbox": [ + 403, + 92, + 820, + 265 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "between languages: even for the best performing models, there is an overlap of not even $50\\%$ between the questions correctly answered across languages.", + "bbox": [ + 169, + 359, + 823, + 390 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.3 Differences between languages", + "text_level": 1, + "bbox": [ + 171, + 405, + 426, + 420 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "So far, with the exception of MTE and parity scores, we have primarily looked at results averaged across languages. Now, we consider language-specific results in a bit more detail.", + "bbox": [ + 169, + 430, + 823, + 459 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/5a773333e4ab35574de8f34ba1e9ec4e4d7fb66080d19020a6de0b49c9e62b74.jpg", + "image_caption": [ + "Figure 4: Average EM per language dev, in mother tongue and English. Top: Average EM on locally sourced data. Bottom: Average EM on locally sourced data, translated to English." + ], + "image_footnote": [], + "bbox": [ + 176, + 474, + 818, + 767 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.3.1 Language difficulty on locally sourced data", + "text_level": 1, + "bbox": [ + 171, + 830, + 526, + 847 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "First, in Figure 4 (top), we show average model results for all languages on all locally sourced data. In broad strokes, the order of difficulty is correlated with how low- or high- resource a language is to be considered: while languages such as French, English and Spanish occur at the easier end of the spectrum, we find Farsi, Khmer and Malay among the most difficult languages. There are a few", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/e5f1a6301f9eacb88d277334073c9d3bc06a6bceaf152447d2047710024fbe06.jpg", + "image_caption": [ + "(a) Locality effect per language", + "Figure 5: Locality Effect dev. (a) Per language Locality Effect, indicating the difference in assigned scores between locally sourced and translated English data. A positive LE means the locally sourced data has a higher score (is easier), a negative LE the English sourced data has a higher score. (b) Per-model rank correlation between language difficulty of languages on locally sourced vs English translated data." + ], + "image_footnote": [], + "bbox": [ + 186, + 90, + 580, + 258 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/5d86d006a27cdf31406eb72a1b910b6a9d13150c13b3ee9064c1db294d2d41f2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelRank correlation language difficulty
Gemini 2.0 Flash0.54
Llama 3.1 405B0.65
GPT4-o0.64
Llama 3.1 405B Chat0.70
Llama 3.1 70B0.60
Claude 3.5 Sonnet0.84
Llama 3.1 70B Chat0.68
Mixtral 8x22B0.86
Qwen2.5 72B0.45
Mixtral 8x22B-it0.88
Qwen2.5 72B instruct0.55
", + "bbox": [ + 602, + 95, + 797, + 218 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "(b) Language difficulty correlations", + "bbox": [ + 596, + 258, + 807, + 273 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "notable exceptions: on average the second highest scoring language in our benchmark is Tagalog. While it is difficult to judge why without doing a detailed analysis on the questions, we hypothesise that the questions asked by the Tagalog language experts are simply less complex than the questions of other languages.", + "bbox": [ + 169, + 378, + 823, + 434 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.3.2 Separating language difficulty from language proficiency", + "text_level": 1, + "bbox": [ + 171, + 449, + 619, + 465 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In an attempt to distinguish data difficulty from language proficiency, we consider also the difficulty of the locally sourced data translated to English. While this conflates data difficulty and transfer (see § 5.2), it still gives us some indication of the extent to which low performance in languages is caused by poor language proficiency versus data difficulty. In the bottom half of Figure 4, we show the model performances as computed on the locally sourced data translated to English. The correlation between these two language difficulty rankings between these setups is 0.79. When comparing the ranks of the various languages, only a handful of languages shift more than a few places. Specifically, Bengali $(26->4)$ , Urdu $(26->12)$ , and Hindi $(14->5)$ all decrease substantially in difficulty rank. The fact that they are comparatively easier in English suggests that for those languages proficiency may be a larger problem than data difficulty. On the other hand, only Russian $(7->21)$ shows a drop of more than 5 places.", + "bbox": [ + 169, + 473, + 826, + 626 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.4 The dataset", + "text_level": 1, + "bbox": [ + 171, + 642, + 294, + 656 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Lastly, we discuss two aspects related to the creation of the dataset. Specifically, we consider the impact of local sourcing vs translated English data, and we have a look at the dataset split across dev and test. We consider the difference between using human-authored as opposed to machine-authored translations.", + "bbox": [ + 169, + 667, + 823, + 724 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.4.1 Locally-sourced vs translated-from-English data", + "text_level": 1, + "bbox": [ + 171, + 739, + 562, + 753 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To study the impact of using locally sourced data, we consider the difference between per-language EM on locally sourced data and translated from English data.", + "bbox": [ + 169, + 763, + 823, + 792 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Language difficulty First, we look at per-language differences between locally sourced and translated English data. We quantify this difference in a metric we call the Locality Effect (LE). The size of the locality effect tells us how much the estimate of a model's strength in a particular language would have been off if we had chosen to use a translated benchmark rather than a locally sourced one. We plot this difference in Figure 5a.", + "bbox": [ + 169, + 806, + 826, + 878 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "As we can see, the scores between locally and translated English-sourced data can differ quite drastically, almost 15 percentage points averaged across models. For individual models, the differences are", + "bbox": [ + 169, + 883, + 826, + 912 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "even larger. For Llama 3.1 405B, the locality effect ranges from -13 to +17; for Gemini 2.0 Flash from -21 to +15; and for GPT4-o from -22 to +14. The differences are not just in absolute scores; also the ordering of language by difficulty is quite different across the two data collection setups, as can be seen by the per-model rank correlations of language difficulty between the two conditions, shown in Figure 5b. Using English-translated rather than locally sourced data does thus not only provide different estimates, but may suggest different languages to focus on for improvement.", + "bbox": [ + 169, + 90, + 823, + 175 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Model rankings Next, we consider the ranking of the models under the two different data regimes. Interestingly, given the transfer effect, changing from locally to English translated data does not make any difference in the ranking. Also in terms of absolute scores, the difference between the two data collection setups is relatively minor. At least for our type of data, it thus appears that using translated data as opposed to locally sourced data may be a reasonable setup for comparing models on average, though not for getting adequate per-language or set language prioritisation.", + "bbox": [ + 169, + 188, + 826, + 273 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5.4.2 The dataset split", + "text_level": 1, + "bbox": [ + 171, + 286, + 341, + 301 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As mentioned in the dataset construction, we took the deliberate decision to generate a split based on topic frequency, rather than creating a random split. The aim of this out-of-distribution split is to test generalisation to topics that are more in the tail of the distribution, as well as encourage improvements in multilinguality beyond having a higher score on the specific released MultiLoKo dev set. Of course, however, because of our sourcing method, all the topics in MultiLoKo are topics on which information is available on Wikipedia. As training data, Wikipedia is often packaged as a single scrape, this may render our deliberate splitting efforts futile: the fact that a page is less visited does not make it less likely that the specific page is included in the training data. Now, we test if the dev and test split are in fact distributionally different.", + "bbox": [ + 169, + 310, + 823, + 434 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/b801df6ef4552fe7a4acd00f70e7b0057dc62f87938a607d60d51710da9f7fbd.jpg", + "image_caption": [ + "Figure 6: Average EM, dev versus test. We show the difference in score distributions between the MultiLoKo dev and test set. The results confirm that the test set is indeed out of distribution with respect to the dev set: dev scores (upper bars) are higher across the board." + ], + "image_footnote": [], + "bbox": [ + 181, + 450, + 818, + 683 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In Figure 6, we show boxplots of dev and test EM scores for all models under consideration. The plot confirms that the split is indeed to be considered an OOD split: for virtually much all models, the test scores are lower than the dev scores. Across all models, the average dev score is 24, whereas the average test score is 21. This suggests that our test set does indeed contain more tail knowledge than the dev set, despite the aforementioned arguments regarding Wikipedia. Interestingly, this implies that Wikipedia may not be the primary source from which models learn this information.", + "bbox": [ + 169, + 752, + 823, + 835 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The difference in difficulty also has bearing on the other metrics: the parity scores (thus: the gap between the best and worst performing language) is 37 for dev vs 34 for test, suggesting that more difficult dat may to some extent hide differences between languages and therefore exemplifying the utility of considering parity along with overall performance. The mother tongue effect, on the other hand, is comparable across dev and test (1.61 vs 1.56, respectively). For the locality effect, the", + "bbox": [ + 169, + 842, + 823, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "effect is less interpretable. While the average difference is substantial (-0.6 dev vs -1.9 test), there is no clear pattern discernable across languages: for some, the effect reduces, whereas for others it increases.", + "bbox": [ + 169, + 90, + 823, + 132 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/07d531440daa7d359f135908c66b8f86aff2111a8ae67e0aa29fd27c7f2f0eeb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelRmin Δmax Δavg Δ
Gemini 2.0 Flash0.80-10.0021.604.35
Llama 3.1 405B0.83-4.4018.805.82
GPT4-o0.85-6.0021.604.46
Llama 3.1 405B Chat0.80-10.4022.403.08
Llama 3.1 70B0.77-7.6022.004.59
Claude 3.5 Sonnet0.90-9.6020.802.84
Llama 3.1 70B Chat0.87-6.0020.003.12
Mixtral 8x22B0.91-3.2020.004.13
Qwen2.5 72B0.83-4.0016.803.47
Mixtral 8x22B-it0.92-4.8012.402.41
Qwen2.5 72B instruct0.80-0.803.200.36
", + "bbox": [ + 179, + 157, + 405, + 258 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/58b5ac41956d6eb877cd3c5fb7488a56cf7367f079130be27c34bff2cd0898d3.jpg", + "image_caption": [ + "(b) MT vs human translations", + "Figure 7: Machine versus human translations dev. (a) Per-model rank correlation between language difficulty between MT and human translations, and min, max and average difference between the two conditions. (b) Difference between EM computed on human- and machine-translated data (human score - machine score), per language." + ], + "image_footnote": [], + "bbox": [ + 429, + 148, + 795, + 303 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "(a) Language difficulty stats across human- and machine translations", + "bbox": [ + 169, + 300, + 411, + 325 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.4.3 Human versus machine translation", + "text_level": 1, + "bbox": [ + 171, + 416, + 470, + 429 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Lastly, we consider the impact of using machine- or human-authored translations. To do so, we look at the differences in EM scores between machine and human translated data for the various languages, taking the human translations as the 'gold standard' (i.e. we consider human translated EM - machine translated EM). We show the results in Figure 7.", + "bbox": [ + 169, + 439, + 823, + 494 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In Figure 7a we show the rank correlations of the difficulties of the various languages per model, as well as the min, max and average drop from human to machine translations. We see the that, at the model level, using machine translations rather than human translations results in a systematic undervaluation of the model scores: there is not a single model for which the 'drop' from human to machine translations is negative on average. In part, this is may be a result of the previously observed lack of knowledge transfer effect. That the drop is not substantially lower for models with better transfer, however, suggests that the more impactful factor is the quality of the machine translations, that may at times result in unanswerable questions.", + "bbox": [ + 169, + 501, + 825, + 612 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In terms of model rankings, the difference between machine and human translations is minor: the model rankings between the two conditions have a rank correlation of 0.97 on the dev split, with only three local swaps (2&3 and 5&6 and 8&9) of models that did not have statistically different scores to begin with. This suggests that to compare models, using machine translation can be an acceptable alternative to human translations, as the mis-estimation is systematic across models.", + "bbox": [ + 169, + 619, + 823, + 688 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Considering the effect across languages, we observe that even though the average drop is positive, for virtually all models there are at least some languages for which performance increases when MT is used, in some cases with even more than 10 points. For a handful of languages - specifically Russian, Swedish and Urdu - this is also true across models (see Figure 7b). While the overall rank correlation is high for language difficulty (0.88), it thus still urges caution in using machine translated data for language improvement prioritisation.", + "bbox": [ + 169, + 694, + 825, + 777 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 797, + 300, + 813 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Notwithstanding the increasing multinational deployment of LLMs in many parts of the world, adequately evaluating their multilinguality remains a challenging enterprise. Only in part is this due to the scarcity of high-quality and broad-coverage multilingual benchmarks for LLM: perhaps a more pressing issue is that the benchmarks that are frequently used for multilingual evaluation virtually all consist of translated English data. While using completely parallel data has its advantages, using translated English data imposes an English-centric bias on the content of the benchmarks,", + "bbox": [ + 169, + 827, + 826, + 912 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "implying that even if the benchmark evaluates multilinguality on the surface, it does not in content. In our work, we aim to address this by presenting MultiLoKo, a multilingual benchmark spanning 31 languages that combines the best of both worlds. MultiLoKo contains 500 questions targeting locally relevant knowledge for 31 languages, separately sourced for each language with a protocol specifically designed to ensure local relevance of the question topics. It is also fully parallel, because it contains human-authored translations of the non-English partitions into English and vice versa. As such, it allows to study various questions related to multilinguality, transfer and multilingual benchmark creation. To prevent quick overfitting and inadvertent contamination, we release a development set of the benchmark, while the test set of the benchmarks remains private, at least for the near future.", + "bbox": [ + 169, + 90, + 826, + 217 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We use MultiLoKo to analyse 4 base and 7 chat models marketed to be multilingual. We find that, of the models we consider, the best performing model is Gemini 2.0 Flash, with an average performance of 34.4 points, and an almost 35 point gap between the best and the worst language, followed by Llama 3.1 405B and GPT4-o, which are close contenders in terms of average performance but both have substantially higher language gaps (39 and 49 points). Generally, scores are better when questions are asked in the language to which they are relevant, indicating suboptimal knowledge transfer between languages, a result that is mirrored by low per-sample consistency across question language.", + "bbox": [ + 169, + 222, + 826, + 335 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "On a meta-level, we study the relevance of using locally sourced data as opposed to translated English data as well as whether it matters if translations are authored by humans or machines. We find that the estimated difficulty of some languages changes drastically across the two sourcing setups, within the range of 15 points decrease and 8 points increase on average across models. The rank correlation between average language difficulty score is 0.78. Furthermore, individual model scores between locally and English-translated data can differ up to 22 points for some languages. However, changing the sourcing setup does not impact model rankings, suggesting that using translated data may be suitable for comparing models but less for model development or language prioritisation. For using machine- instead of human-authored translations, as well, the effect on model ranking is limited $(\\mathrm{R} = 0.97)$ , but the difficulty estimates of various languages changes with up to 12 points. Furthermore, using machine translated data results in lower average scores for all models, with drops ranging from 2 to $34\\%$ of the human-translated scores.", + "bbox": [ + 169, + 339, + 826, + 505 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "While our results section is extensive already, there are still several parts of MultiLoKo that we did not explore. For instance, because of the sourcing strategy, each native question is coupled with a paragraph that contains the answer to the question. MultiLoKo could thus be transformed into a reading-comprehension benchmark, and we consider studying the difference between the knowledge and reading comprehension setup an interesting direction for future work. Furthermore, each question contains an elaborate long answer intended to explain the short answer. We have not used the long answers in any of our experiments, but foresee interesting directions including studies into CoT prompting or studying answer rationales.", + "bbox": [ + 169, + 511, + 826, + 625 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "7 Limitations", + "text_level": 1, + "bbox": [ + 171, + 646, + 305, + 662 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this last section, we discuss various limitations of our work.", + "bbox": [ + 171, + 679, + 584, + 694 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Local relevance In our sourcing protocol, we explicitly sought to create questions locally relevant to the respective languages. It is important to notice, however, that some languages, such as English, Spanish, Portuguese, Chinese, French and to a lesser extent German and Dutch cover a wide variety of cultures. We did not separately control for that and the data for those languages thus likely comprises a mix of different locales.", + "bbox": [ + 169, + 712, + 826, + 782 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Data quality Building a bias-free evaluation datasets with few mistakes is not an easy feat. Even though we implemented several rounds of quality checks in our data collection pipeline, when looking at outputs we still incidentally found mistakes in the data or answers. We fixed some of these mistakes as we encountered them, but it is quite likely that more such mistakes occur in the dataset. It is also important to point out that we are less likely to spot such issues for languages that we do not understand at all, potentially creating a bias towards the set of languages for which we have a rudimentary understanding. Overall, however, we believe that the pipeline we designed assures a dataset of high quality. Of course, we welcome reports of mistakes spotted by others in the data.", + "bbox": [ + 169, + 800, + 826, + 912 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Evaluation Because MultiLoKo is a generative benchmark, computing scores requires comparisons of a generated answer with a set of gold answers. A first obstacle to this method of evaluation is that it is hard to create an exhaustive list of correct short-form answers. This is especially true when the correct answer is not a number, date, title or something else that can be expressed only in a few ways. In addition to that, it is hard to incentivise LLMs to produce concise answers. Even when instructed to answer with only a number / date / name / title, they may respond with a full sentence, add a reasoning trail to their answer, or add words beyond the minimal answer in a different fashion. We addressed such issues that were systematic in post-processing (see Appendix B), but it is hard to a priori catch allthe ways that LLMs may deviate from the requested protocols. In some cases, we found additional post-processing steps that increased the scores of some models only later in the process, because scores for particular languages looked suspiciously low. For instance, we had not initially realised that our punctuation stripper did not strip punctuation in Urdu, which specifically influenced GPT4-o and Gemini. We considered several other metrics as well as judges, but eventually found that EM provided the clearest and least biased signal. It remains, however, a challenge to evaluate chatty LLMs completely independently from their ability to follow instructions.", + "bbox": [ + 174, + 90, + 823, + 297 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Wikipedia as information source MultiLoKo, as several other both multilingual as well as monolingual benchmarks, uses Wikipedia as main source of information. This has the advantage that Wikipedia has a large coverage across many different languages and the information is considered to be of high quality. It also facilitates comparable sourcing across languages. Of course, it also poses limitations. For one, it still provides a bias to the specific topics that can be included, that are usually primarily knowledge based. In fact, MultiLoKo is indeed a knowledge benchmark; it does not consider other types of skills. Secondly, and perhaps more importantly, Wikipedia is a corpus frequently used in the training data of LLMs. The fact that MultiLoKo is a challenging benchmark even given that (multilingual) wikipedia is likely included in the training data of most of the LLMs evaluated suggests that this is not a large issue at the moment. However, it is very possible that MultiLoKo can be 'hacked' relatively easily simply by strongly oversampling multilingual wikipedia data.", + "bbox": [ + 174, + 321, + 823, + 486 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 174, + 516, + 336, + 534 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "While this paper knows only two authors, this benchmark would not have been possible without the support and contributions of many people. We wish to thank all of them in this last section. First, we thank Van Phung, Kriz Chan, Antonio Gai, Dunant Hin and Emily Du for their support on facilitating and streamlining interactions with vendors for the data collection process, and Milena Hoffman for her indispensable administrative support in managing the data collection process. We would furthermore like to thank Van Phung and Kriz Chan for their continued help on ensuring data quality, saliency checking output, brainstorming and general support throughout the creation of the benchmark.", + "bbox": [ + 174, + 551, + 823, + 662 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We also thank the linguists that helped us for their contributions to the analysis of the pilot questions in the benchmark, which played an important role in finetuning and improving our annotation protocol as well as disregard inappropriate questions, and for helping us design prompt templates to allow language-specific querying of models in different stages for each of the languages in MultiLoKo. Specifically, we would like to thank Abdul Haque (Urdu), Aleksandra Antokhina (Russian), Ananya Banerjee (Bengali), Firman Tahar (Indonesian), Florian Mouret (French), Francisco Paredes Maldonado (Spanish), Eriko Nakamura (Japanese), Julie Lee (Korean), Khanh Tien (Vietnamese), Miao Yeh (Traditional Chinese), Renata Barboza (Portuguese), Rishabh Goel (Hindi), Sanket Suhas Satope (Marathi), Sara Martellini (Italian) and Silvia Aponte (German). We thank Kriz Chan by streamlining our collaboration with these linguists, and Maria Paez Playa for offering her teams time on this enterprise. We furthermore thank Sabrina Qiao for providing resources for quick-turnaround QA support, and Ateeq Awan (English), Kaila Conley-Coversi (Italian), Semanti Roy (Bengali) and Shahmir Shaikh (English) for delivering this QA support.", + "bbox": [ + 174, + 667, + 823, + 849 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "As doing manual saliency checks is challenging for a multilingual benchmark, we also relied on the help of several colleagues to debug small issues, detect errors in questions and prompts and double check annotation judgements. We would like to thank Anna Prochowska, Daria Dudurca, Diego Perino, Etai Sella, Ivan John Piramide, Lovish Madaan, Yanir Kleiman for their help on this.", + "bbox": [ + 174, + 854, + 821, + 910 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 491, + 936, + 506, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 106 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Fakhraddin Alwajih, Gagan Bhatia, and Muhammad Abdul-Mageed. Dallah: A dialect-aware multimodal large language model for Arabic. In Nizar Habash, Houda Bouamor, Ramy Eskander, Nadi Tomeh, Ibrahim Abu Farha, Ahmed Abdelali, Samia Touileb, Injy Hamed, Yaser Onaizan, Bashar Alhafni, Wissam Antoun, Salam Khalifa, Hatem Haddad, Imed Zitouni, Badr AlKhamissi, Rawan Almatham, and Khalil Mrini, editors, Proceedings of The Second Arabic Natural Language Processing Conference, pages 320-336, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.arabicnlp-1.27. URL https://aclanthology.org/2024.arabicnlp-1.27/.", + "Anthropic. Claude 3.5 sonnet. https://www.anthropic.com/news/claude-3-5-sonnet, 2025. Accessed: 2025-04-11.", + "Mikel Artetxe, Sebastian Ruder, and Dani Yogatama. On the cross-lingual transferability of monolingual representations. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault, editors, Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4623–4637, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.421. URL https://aclanthology.org/2020.acl-main.421/.", + "Lucas Bandarkar, Davis Liang, Benjamin Muller, Mikel Artetxe, Satya Narayan Shukla, Donald Husa, Naman Goyal, Abhinandan Krishnan, Luke Zettlemoyer, and Madian Khabsa. The belebele benchmark: a parallel reading comprehension dataset in 122 language variants. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 749-775, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.44. URL https://aclanthology.org/2024.acl-long.44/.", + "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin, editors, Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/1457c0d6bcbd4967418bf8ac142f64a-Abstract.html.", + "Pinzhen Chen, Simon Yu, Zhicheng Guo, and Barry Haddow. Is it good data for multilingual instruction tuning or just bad multilingual evaluation for large language models? CoRR, abs/2406.12822, 2024. URL https://doi.org/10.48550/arXiv.2406.12822.", + "Zhihong Chen, Shuo Yan, Juhao Liang, Feng Jiang, Xiangbo Wu, Fei Yu, Guiming Hardy Chen, Junying Chen, Hongbo Zhang, Li Jianquan, et al. Multilingualsift: Multilingual supervised instruction fine-tuning, 2023. URL https://arxiv.org/pdf/2412.15115.", + "Jonathan H. Clark, Eunsol Choi, Michael Collins, Dan Garrette, Tom Kwiatkowski, Vitaly Nikolaev, and Jennimaria Palomaki. TyDi QA: A benchmark for information-seeking question answering in typologically diverse languages. Transactions of the Association for Computational Linguistics, 8: 454-470, 2020. doi: 10.1162/tacl_a_00317. URL https://aclanthology.org/2020.tacl-1. 30/.", + "Alexis Conneau, Rudy Rinott, Guillaume Lample, Adina Williams, Samuel Bowman, Holger Schwenk, and Veselin Stoyanov. XNLI: Evaluating cross-lingual sentence representations. In Ellen Riloff, David Chiang, Julia Hockenmaier, and Jun'ichi Tsujii, editors, Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2475–2485, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1269. URL https://aclanthology.org/D18-1269/." + ], + "bbox": [ + 173, + 114, + 828, + 912 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurélien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Rozière, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Allonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Grégoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel M. Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, and et al. The llama 3 herd of models. CoRR, abs/2407.21783, 2024. doi: 10.48550/ARXIV.2407.21783. URL https://doi.org/10.48550/arXiv.2407.21783.", + "Alena Fenogenova, Artem Chervyakov, Nikita Martynov, Anastasia Kozlova, Maria Tikhonova, Albina Akhmetgareeva, Anton Emelyanov, Denis Shevelev, Pavel Lebedev, Leonid Sinev, Ulyana Isaeva, Katerina Kolomeytseva, Daniil Moskovskiy, Elizaveta Goncharova, Nikita Savushkin, Polina Mikhailova, Anastasia Minaeva, Denis Dimitrov, Alexander Panchenko, and Sergey Markov. MERA: A comprehensive LLM evaluation in Russian. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9920–9948, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.534. URL https://aclanthology.org/2024.acl-long.534/.", + "Google DeepMind. Google gemini ai update - December 2024. https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/, 2024. Accessed: 2025-04-11.", + "Naman Goyal, Cynthia Gao, Vishrav Chaudhary, Peng-Jen Chen, Guillaume Wenzek, Da Ju, Sanjana Krishnan, Marc'Aurelio Ranzato, Francisco Guzmán, and Angela Fan. The Flores-101 evaluation benchmark for low-resource and multilingual machine translation. Transactions of the Association for Computational Linguistics, 10:522-538, 2022. doi: 10.1162/tacl_a_00474. URL https://aclanthology.org/2022.tacl-1.30/.", + "Momchil Hardalov, Todor Mihaylov, Dimitrina Zlatkova, Yoan Dinkov, Ivan Koychev, and Preslav Nakov. EXAMS: A multi-subject high school examinations dataset for cross-lingual and multilingual question answering. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5427-5444, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.438. URL https://aclanthology.org/2020.emnlp-main.438/.", + "Yun He, Di Jin, Chaoqi Wang, Chloe Bi, Karishma Mandyam, Hejia Zhang, Chen Zhu, Ning Li, Tengyu Xu, Hongjiang Lv, Shruti Bhosale, Chenguang Zhu, Karthik Abinav Sankararaman, Eryk Helenowski, Melanie Kambadur, Aditya Tayade, Hao Ma, Han Fang, and Sinong Wang. Multi-if: Benchmarking llms on multi-turn and multilingual instructions following. CoRR, abs/2410.15553, 2024. doi: 10.48550/ARXIV.2410.15553. URL https://doi.org/10.48550/arXiv.2410.15553.", + "Dieuwke Hupkes, Mario Giulianielli, Verna Dankers, Mikel Artetxe, Yanai Elazar, Tiago Pimentel, Christos Christodoulopoulos, Karim Lasri, Naomi Saphra, Arabella Sinclair, et al. A taxonomy and review of generalization research in nlp. Nature Machine Intelligence, 5(10):1161-1174, 2023.", + "Zhengbao Jiang, Antonios Anastasopoulos, Jun Araki, Haibo Ding, and Graham Neubig. X-FACTR: Multilingual factual knowledge retrieval from pretrained language models. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical" + ], + "bbox": [ + 171, + 90, + 826, + 911 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Methods in Natural Language Processing (EMNLP), pages 5943-5959, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.479. URL https://aclanthology.org/2020.emnlp-main.479/.", + "Jaap Jumelet, Leonie Weissweiler, and Arianna Bisazza. Multiblimp 1.0: A massively multilingual benchmark of linguistic minimal pairs. CoRR, abs/2504.02768, 2025. URL https://doi.org/10.48550/arXiv.2504.02768.", + "Nora Kassner, Philipp Duffer, and Hinrich Schütze. Multilingual LAMA: Investigating knowledge in multilingual pretrained language models. In Paola Merlo, Jorg Tiedemann, and Reut Tsarfaty, editors, Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 3250-3258, Online, April 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.eacl-main.284. URL https://aclanthology.org/2021.eacl-main.284/.", + "Fajri Koto, Nurul Aisyah, Haonan Li, and Timothy Baldwin. Large language models only pass primary school exams in Indonesia: A comprehensive test on IndoMMLU. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12359-12374, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.760. URL https://aclanthology.org/2023.emnlp-main.760/.", + "Patrick Lewis, Barlas Oguz, Rudy Rinnott, Sebastian Riedel, and Holger Schwenk. MLQA: Evaluating cross-lingual extractive question answering. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault, editors, Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7315–7330, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.653. URL https://aclanthology.org/2020.acl-main.653/.", + "Haonan Li, Yixuan Zhang, Fajri Koto, Yifei Yang, Hai Zhao, Yeyun Gong, Nan Duan, and Timothy Baldwin. CMMLU: Measuring massive multitask language understanding in Chinese. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 11260–11285, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.671. URL https://aclanthology.org/2024.findings-acl.671/.", + "Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, and Xian Li. Few-shot learning with multilingual generative language models. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 9019-9052, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.616. URL https://aclanthology.org/2022.emnlp-main.616/.", + "Lovish Madaan, Aaditya K Singh, Ryan Schaeffer, Andrew Poulton, Sanmi Koyejo, Pontus Stenetorp, Sharan Narang, and Dieuwke Hupkes. Quantifying variance in evaluation benchmarks. arXiv preprint arXiv:2406.10229, 2024.", + "Moran Mizrahi, Guy Kaplan, Dan Malkin, Rotem Dror, Dafna Shahaf, and Gabriel Stanovsky. State of what art? a call for multi-prompt LLM evaluation. Transactions of the Association for Computational Linguistics, 12:933-949, 2024. doi: 10.1162/tacl_a_00681. URL https://aclanthology.org/2024.tacl-1.52/.", + "Niklas Muennighoff, Thomas Wang, Lintang Sutawika, Adam Roberts, Stella Biderman, Teven Le Scao, M Saiful Bari, Sheng Shen, Zheng Xin Yong, Hailey Schoelkopf, Xiangru Tang, Dragomir Radev, Alham Fikri Aji, Khalid Almubarak, Samuel Albanie, Zaid Alyafeai, Albert Webson, Edward Raff, and Colin Raffel. Crosslingual generalization through multitask finetuning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15991-16111, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.891. URL https://aclanthology.org/2023.acl-long.891/." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Xenia Ohmer, Elia Bruni, and Dieuwke Hupkes. Separating form and meaning: Using self-consistency to quantify task understanding across multiple senses. In Sebastian Gehrmann, Alex Wang, João Sedoc, Elizabeth Clark, Kaustubh Dhole, Khyathi Raghavi Chandu, Enrico Santus, and Hoorman Sedghamiz, editors, Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM), pages 258-276, Singapore, December 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023.gem-1.22/.", + "Xenia Ohmer, Elia Bruni, and Dieuwke Hupke. From form(s) to meaning: Probing the semantic depths of language models using multisense consistency. Computational Linguistics, 50(4):1507-1556, 12 2024. ISSN 0891-2017. doi: 10.1162/coli_a_00529. URL https://doi.org/10.1162/coli_a_00529.", + "OpenAI. Mmmlu dataset. https://huggingface.co/datasets/openai/MMMLU, 2025. Accessed: 2025-04-11.", + "OpenAI,., Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, Aleksander Madry, Alex Baker-Whitcomb, Alex Beutel, Alex Borzunov, Alex Carney, Alex Chow, Alex Kirillov, Alex Nichol, Alex Paine, Alex Renzin, Alex Tachard Passos, Alexander Kirillov, Alex Christakis, Alexis Conneau, Ali Kamali, Allan Jabri, Allison Moyer, Allison Tam, Amadou Crookes, Amin Tootoochian, Amin Tootoonchian, Ananya Kumar, Andrea Vallone, Andrej Karpathy, Andrew Braunstein, Andrew Cann, Andrew Codispoti, Andrew Galu, Andrew Kondrich, Andrew Tulloch, Andrey Mishchenko, Angela Baek, Angela Jiang, Antoine Pelisse, Antonia Woodford, Anuj Gosalia, Arka Dhar, Ashley Pantuliano, Avi Nayak, Avital Oliver, Barret Zoph, Behrooz Ghorbani, Ben Leimberger, Ben Rossen, Ben Sokolowsky, Ben Wang, Benjamin Zweig, Beth Hoover, Blake Samic, Bob McGrew, Bobby Spero, Bogo Giertler, Bowen Cheng, Brad Lightcap, Brandon Walkin, Brendan Quinn, Brian Guarraci, Brian Hsu, Bright Kellogg, Brydon Eastman, Camillo Lugaresi, Carroll Wainwright, Cary Bassin, Cary Hudson, Casey Chu, Chad Nelson, Chak Li, Chan Jun Shern, Channing Conger, Charlotte Barette, Chelsea Voss, Chen Ding, Cheng Lu, Chong Zhang, Chris Beaumont, Chris Hallacy, Chris Koch, Christian Gibson, Christina Kim, Christine Choi, Christine McLeavey, Christopher Hesse, Claudia Fischer, Clemens Winter, Coley Czarnecki, Colin Jarvis, Colin Wei, Constantin Koumouzelis, Dane Sherburn, Daniel Kappler, Daniel Levin, Daniel Levy, David Carr, David Farhi, David Mely, David Robinson, David Sasaki, Kenny Jin, Dev Valladares, Dimitris Tsipras, Doug Li, Duc Phong Nguyen, Duncan Findlay Edede Oiwoh, Edmund Wong Ehsan Asdar Elizabeth Proehl Elizabeth Yang Eric Antonow Eric Kramer Eric Peterson Eric Sigler Eric Wallace Eugene Brevdo Evan Mays Farzad Khorasani Felipe Petroski Such Filippo Raso Francis Zhang Fred von Lohmann Freddie Sult Gabriel Goh Gene Oden Geoff Salmon Giulio Starace Greg Brockman Hadi Salman Haiming Bao Haitang Hu Hannah Wong Haoyu Wang Heather Schmidt Heather Whitney Heewoo Jun Hendrik Kirchner Henrique Ponde de Oliveira Pinto Hongyu Ren Huiwen Chang Hyung Won Chung Ian Kivlichan Ian O'Connell Ian O'Connell Ian Osband Ian Silber Ian Sohl Ibrahim Okuyucu Ikai Lan Ilya Kostrikov Ilya Sutskever Ingmar Kanitscheider Ishaan Gulrajani Jacob Coxon Jacob Menick Jakub Pachocki James Aung James Betker James Crooks James Lennon Jamie Kiros Jan Leike Jane Park Jason Kwon Jason Phang Jason Teplitz Jason Wei Jason Wolfe Jay Chen Jeff Harris Jenia Varavva Jessica Gan Lee Jessica Shieh Ji Lin Jiahui Yu Jiayi Weng Jie Tang Jieqi Yu Joanne Jang Joaquin Quinonero Candela Joe Beutler Joe Landers Joel Parish Johannes Heidecke John Schulman Jonathan Lachman Jonathan McKay Jonathan Uesato Jonathan Ward Jong Wook Kim Joost Huizinga Jordan Sitkin Jos Kraaijeveld Josh Gross Josh Kaplan Josh Snyder Joshua Achiam Joy Jiao Joyce Lee Juntang Zhuang Justyn Harriman Kai Fricke Kai Hayashi Karan Singhal Katy Shi Kevin Karthik Kayla Wood Kendra Rimbach Kenny Hsu Kenny Nguyen Keren Gu-Lemberg Kevin Button Kevin Liu Kiel Howe Krithika Muthukumar Kyle Luther Lama Ahmad Larry Kai Lauren Itow Lauren Workman Leher Pathak Leo Chen Li Jing Lia Guy Liam Fedus Liang Zhou Lien Mamitsuka Lilian Weng Lindsay McCallum Lindsey Held Long Ouyang Louis Feuvrier Lu Zhang Lukas Kondraciuk Lukasz Kaiser Luke Hewitt Luke Metz Lyric Doshi Mada Aflak Maddie Simens Madelaine Boyd Madeleine Thompson Marat Dukhan Mark Chen Mark Gray Mark Hudnall Marvin Zhang Marwan Aljubeh Mateusz Litwin Matthew Zeng Max Johnson Maya Shetty Mayank Gupta Meghan Shah Mehmet Yatbaz Meng Jia Yang Mengchao Zhong Mia Glaese Mianna Chen Michael Janner Michael Lampe Michael Petrov Michael Wu Michele Wang Michelle Fradin Michelle Pokrass Miguel Castro Miguel Oom Temudo de Castro Mikhail Pavlov Miles" + ], + "bbox": [ + 171, + 90, + 825, + 911 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Brundage, Miles Wang, Minal Khan, Mira Murati, Mo Bavarian, Molly Lin, Murat Yesildal, Nacho Soto, Natalia Gimelshein, Natalie Cone, Natalie Staudacher, Natalie Summers, Natan LaFontaine, Neil Chowdhury, Nick Ryder, Nick Stathas, Nick Turley, Nik Tezak, Nik Felix, Nithanth Kudige, Nitish Keskar, Noah Deutsch, Noel Bundick, Nora Puckett, Ofir Nachum, Ola Okelola, Oleg Boiko, Oleg Murk, Oliver Jaffe, Olivia Watkins, Olivier Godement, Owen Campbell-Moore, Patrick Chao, Paul McMillan, Pavel Belov, Peng Su, Peter Bak, Peter Bakkum, Peter Deng, Peter Dolan, Peter Hoeschele, Peter Welinder, Phil Tillet, Philip Pronin, Philippe Tillet, Prafulla Dhariwal, Qiming Yuan, Rachel Dias, Rachel Lim, Rahul Arora, Rajan Troll, Randall Lin, Rapha Gontijo Lopes, Raul Puri, Reah Miyara, Reimar Leike, Renaud Gaubert, Reza Zamani, Ricky Wang, Rob Donnelly, Rob Honsby, Rocky Smith, Rohan Sahai, Rohit Ramchandani, Romain Huet, Rory Carmichael, Rowan Zellers, Roy Chen, Ruby Chen, Ruslan Nigmatullin, Ryan Cheu, Saachi Jain, Sam Altman, Sam Schoenholz, Sam Toizer, Samuel Miserendino, Sandhini Agarwal, Sara Culver, Scott Ethersmith, Scott Gray, Sean Grove, Sean Metzger, Shamez Hermani, Shantanu Jain, Shengjia Zhao, Sherwin Wu, Shino Jomoto, Shirong Wu, Shuaiqi, Xia, Sonia Phene, Spencer Papay, Srinivas Narayanan, Steve Coffey, Steve Lee, Stewart Hall, Suchir Balaji Tal Broda Tal Stramer, Tao Xu, Tarun Gogineni, Taya Christianson, Ted Sanders, Tejal Patwardhan, Thomas Cunninghamman, Thomas Degry, Thomas Dimson, Thomas Raoux, Thomas Shadwell, Tianhao Zheng Todd Underwood,Todor Markov,Toki Sherbakov,Tom Rubin Tom Stasi Tomer Kaftan. Tristan Heywood,Troy Peterson,Tyce Walters,Tyna Eloundou,V Valerie Qi,Veit Moeller,Vinnie Monaco,Vishal Kuo,Vlad Fomenko,Wayne ChangWeiyi ZhengWenda ZhouWesam Manassra Will Sheu Wojciech Zaremba,Yash Patil Yilei Qian Yongjik Kim Youlong ChengYu Zhang. Yuchen He,Yuchen Zhang,Yujia Jin,Yunxing Dai,and Yury Malkov.Gpt-4o system card2024. URL https://arxiv.org/abs/2410.21276.", + "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Pierre Isabelle, Eugene Charniak, and Dekang Lin, editors, Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA, July 2002. Association for Computational Linguistics. doi: 10.3115/1073083.1073135. URL https://aclanthology.org/P02-1040/.", + "Edoardo Maria Ponti, Goran Glavaš, Olga Majewska, Qianchu Liu, Ivan Vulić, and Anna Korhonen. XCOPA: A multilingual dataset for causal commonsense reasoning. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2362-2376, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.185. URL https://aclanthology.org/2020.emnlp-main.185/.", + "Maja Popovic. chrF: character n-gram F-score for automatic MT evaluation. In Ondrej Bojar, Rajan Chatterjee, Christian Federmann, Barry Haddow, Chris Hokamp, Matthias Huck, Varvara Logacheva, and Pavel Pecina, editors, Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392-395, Lisbon, Portugal, September 2015. Association for Computational Linguistics. doi: 10.18653/v1/W15-3049. URL https://aclanthology.org/W15-3049/.", + "Jirui Qi, Raquel Fernández, and Arianna Bisazza. Cross-lingual consistency of factual knowledge in multilingual language models. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 10650-10666, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.658. URL https://aclanthology.org/2023.emnlp-main.658/.", + "Qwen,.; An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tianyi Tang, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115.", + "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. SQuAD: 100,000+ questions for machine comprehension of text. In Jian Su, Kevin Duh, and Xavier Carreras, editors, Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2383-2392, Austin, Texas, November 2016. Association for Computational Linguistics. doi: 10.18653/v1/D16-1264. URL https://aclanthology.org/D16-1264/." + ], + "bbox": [ + 173, + 90, + 826, + 911 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Angelika Romanou, Negar Foroutan, Anna Sotnikova, Zeming Chen, Sree Harsha Nelaturu, Shivalika Singh, Rishabh Maheshwary, Micol Altomare, Mohamed A. Haggag, Snegha A, Alfonso Amayuelas, Azril Hafizi Amirudin, Viraat Aryabumi, Danylo Boiko, Michael Chang, Jenny Chim, Gal Cohen, Aditya Kumar Dalmia, Abraham Diress, Sharad Duwal, Daniil Dzenhaliou, Daniel Fernando Erazo Florez, Fabian Farestam, Joseph Marvin Imperial, Shayekh Bin Islam, Perttu Isotalo, Maral Jabbarishiviari, Borje F. Karlsson, Eldar Khalilov, Christopher Klamm, Fajri Koto, Dominik Krzeminski, Gabriel Adriano de Melo, Syrielle Montariol, Yiyang Nan, Joel Niklaus, Jekaterina Novikova, Johan Samir Obando Ceron, Debjit Paul, Esther Ploeger, Jebish Purbey, Swati Rajwal, Selvan Sunitha Ravi, Sara Rydell, Roshan Santhosh, Drishti Sharma, Marjana Prifti Skenduli, Arshia Soltani Moakhar, Bardia Soltani Moakhar, Ran Tamir, Ayush Kumar Tarun, Azmine Toushik Wasi, Thenuka Ovin Weerasinghe, Serhan Yilmaz, Mike Zhang, Imanol Schlag, Marzieh Fadaee, Sara Hooker, and Antoine Bosselut. INCLUDE: evaluating multilingual language understanding with regional knowledge, 2024. URL https://doi.org/10.48550/arXiv.2411.19799.", + "Eduardo Sánchez, Belen Alastruey, Christophe Ropers, Pontus Stenetorp, Mikel Artetxe, and Marta R. Costa-jussà. Linguini: A benchmark for language-agnostic linguistic reasoning. CoRR, abs/2409.12126, 2024. doi: 10.48550/ARXIV.2409.12126. URL https://doi.org/10.48550/arXiv.2409.12126.", + "Priyanka Sen, Alham Fikri Aji, and Amir Saffari. Mintaka: A complex, natural, and multilingual dataset for end-to-end question answering. In Nicoletta Calzolari, Chu-Ren Huang, Hansaem Kim, James Pustejovsky, Leo Wanner, Key-Sun Choi, Pum-Mo Ryu, Hsin-Hsi Chen, Lucia Donatelli, Heng Ji, Sadao Kurohashi, Patrizia Paggio, Nianwen Xue, Seokhwan Kim, Younggyun Hahm, Zhong He, Tony Kyungil Lee, Enrico Santus, Francis Bond, and Seung-Hoon Na, editors, Proceedings of the 29th International Conference on Computational Linguistics, pages 1604-1619, Gyeongju, Republic of Korea, October 2022. International Committee on Computational Linguistics. URL https://aclanthology.org/2022.coling-1.138/.", + "Sheikh Shafayat, H Hasan, Minhajur Mahim, Rifki Putri, James Thorne, and Alice Oh. BEnQA: A question answering benchmark for Bengali and English. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1158-1177, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10. 18653/v1/2024-findings-acl.68. URL https://aclanthology.org/2024-findings-acl.68/.", + "Freda Shi, Mirac Suzgun, Markus Freitag, Xuezhi Wang, Suraj Srivats, Soroush Vosoughi, Hyung Won Chung, Yi Tay, Sebastian Ruder, Denny Zhou, Dipanjan Das, and Jason Wei. Language models are multilingual chain-of-thought reasoners. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. URL https://openreview.net/forum?id=fR3wGck-IXp.", + "Shivalika Singh, Angelika Romanou, Clémentine Fourrier, David Ifeoluwa Adelani, Jian Gang Ngui, Daniel Vila-Suero, Peerat Limkonchotiwat, Kelly Marchisio, Wei Qi Leong, Yosephine Susanto, Raymond Ng, Shayne Longpre, Wei-Yin Ko, Madeline Smith, Antoine Bosselut, Alice Oh, Andre F. T. Martins, Leshem Choshen, Daphne Ippolito, Enzo Ferrante, Marzieh Fadaee, Beyza Ermis, and Sara Hooker. Global MMLU: understanding and addressing cultural and linguistic biases in multilingual evaluation. CoRR, abs/2412.03304, 2024. doi: 10.48550/ARXIV.2412.03304. URL https://doi.org/10.48550/arXiv.2412.03304.", + "Mistral AI team. Cheaper, better, faster, stronger, 2024. URL https://mistral.ai/news/mixtral-8x22b. Accessed: 4-Apr-2025.", + "Aman Singh Thakur, Kartik Choudhary, Venkat Srinik Ramayapally, Sankaran Vaidyanathan, and Dieuwke Hupkes. Judging the judges: Evaluating alignment and vulnerabilities in Ilms-as-judges. CoRR, abs/2406.12624, 2024. URL https://doi.org/10.48550/arXiv.2406.12624.", + "Lucas Weber, Elia Bruni, and Dieuwke Hupkes. Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning. In Jing Jiang, David Reitter, and Shumin Deng, editors, Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL), pages 294-313, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.conll-1.20. URL https://aclanthology.org/2023.conll-1.20/." + ], + "bbox": [ + 171, + 90, + 826, + 911 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Weihao Xuan, Rui Yang, Heli Qi, Qingcheng Zeng, Yunze Xiao, Yun Xing, Junjue Wang, Huitao Li, Xin Li, Kunyu Yu, Nan Liu, Qingyu Chen, Douglas Teodoro, Edison Marrese-Taylor, Shijian Lu, Yusuke Iwasawa, Yutaka Matsuo, and Irene Li. Mmlu-prox: A multilingual benchmark for advanced large language model evaluation. CoRR, abs/2503.10497, 2025. URL https://doi.org/10.48550/arXiv.2503.10497.", + "Wenxuan Zhang, Mahani Aljunied, Chang Gao, Yew Ken Chia, and Lidong Bing. M3exam: A multilingual, multimodal, multilevel benchmark for examining large language models. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine, editors, Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/117c5c8622b0d539f74f6d1fb082a2e9-Abstract-Datasets_and_Benchmarks.html.", + "Yuan Zhang, Jason Baldridge, and Luheng He. PAWS: Paraphrase adversaries from word scrambling. In Jill Burstein, Christy Doran, and Thamar Solorio, editors, Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1298-1308, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1131. URL https://aclanthology.org/N19-1131/." + ], + "bbox": [ + 173, + 90, + 826, + 359 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "A Additional dataset statistics", + "text_level": 1, + "bbox": [ + 171, + 89, + 441, + 104 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "For reference, we provide a few dataset statistics beyond the main results in the paper.", + "bbox": [ + 171, + 121, + 736, + 137 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/0ee44851e6330b28bb51edddb359d3b46ffd35f998c59f2c4c70256478702034.jpg", + "image_caption": [ + "Figure 8: Distribution of output types on the dev split. We show the normalised distribution of correct output types across languages, ordered (from bottom to top) by average frequency. Rare output types that occur only a few times are mapped to the category 'other'." + ], + "image_footnote": [], + "bbox": [ + 207, + 146, + 785, + 316 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Output type distribution In Figure 8, we show the per-language distribution of output types for MultiLoKo dev split.10 We mapped very rare output types, such as 'a quantity', 'a period of time' or 'letter' to 'other', for plotting purposes. We can see that name is the most common output type across languages, followed by the generic output type a word and number. Also place and date are relatively common output types, whereas most other output types occur very infrequently or only for a handful of languages.", + "bbox": [ + 169, + 386, + 823, + 469 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/bd414948b1fc64eac369d890930e1fd6fdc455be1d18c2df3bdd0cd54887fd29.jpg", + "image_caption": [ + "Figure 9: Average question and answer lengths. We show the per-question average length (in words) of the locally-sourced questions and answers, human-translated into English." + ], + "image_footnote": [], + "bbox": [ + 222, + 486, + 772, + 772 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Input and output length In addition to that, we show the average question – and output lengths of human-translated the locally sourced questions to English in Figure 9. While there is some variation in particular in question length, the lengths of the answers are relatively consistent. The average answer length is around 2, combining one-word answers with (usually) longer names.", + "bbox": [ + 169, + 827, + 823, + 883 + ], + "page_idx": 21 + }, + { + "type": "page_footnote", + "text": "10Because the test split is blind, we do not report the distribution of output types here.", + "bbox": [ + 187, + 896, + 692, + 911 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "B Instruction following", + "text_level": 1, + "bbox": [ + 171, + 89, + 385, + 107 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "To facilitate evaluation, we instruct models to answer question with only a number/place/etc. Overall, we found that base models (with a five-shot template) are much better at abiding by this instruction than chat models, which exhibit a number of pathologies. While some of those can be caught with appropriate post-processing (see Appendix C, this is not the case for all issues. Below, we provide a summary of the main instruction-following issues we encountered with chat models.", + "bbox": [ + 169, + 121, + 826, + 191 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "False refusals Sometimes chat models refuse to provide an answer when the question is falsely perceived to be inappropriate (e.g. when the question asks about someone aged younger than 18).", + "bbox": [ + 169, + 200, + 823, + 231 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Producing full sentences Another issue we observed is that chat models would provide a full sentence answer, rather than a single word or phrase (e.g. Which year was Francisco Franco born? Produce a year only. - Francisco Franco was born in 1936). Such full-sentence answers make exact match rating impossible. The effect is not consistent across languages and happens only for some of the examples, without any discernable pattern, and therefore difficult to address completely with post-processing.[11]", + "bbox": [ + 169, + 239, + 826, + 324 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Spurious addition of \"answer is\" Likely due to overtraining on MMLU style tasks, Models such as OpenAI's GPT4 and Gemini 2.0 preface the vast majority of the answers in English with \"answer is\" or \"X answer is X\" where X is the desired correct response. This is remarkable, because it is essentially a repetition of the end of the prompt. However, it is easy to fix in post-processing.", + "bbox": [ + 169, + 333, + 825, + 390 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Japanese specific issues In Japanese, in general it is not polite to answer with incomplete sentences. As such chat models often append the copula verb \"desu\" to the answer, making exact match unsuccessful. We are able to fix this in postprocessing.", + "bbox": [ + 169, + 398, + 826, + 443 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Claude 3.5 Sonnet issues We were unable to make Claude 3.5 Sonnet follow the instructions to produce just an answer in English. It seemed to engage in a long chain-of-thought reasoning style response which we were unable to reliably parse. This issue only manifests in English and only with Claude. For this reason, we exclude Claude 3.5 Sonnet from our knowledge transfer results, as it would make the average lack of knowledge transfer from non-English languages to English more severe than they are.", + "bbox": [ + 169, + 450, + 825, + 536 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "C Post-processing details", + "text_level": 1, + "bbox": [ + 171, + 556, + 401, + 574 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We perform the following post-processing for both the reference answers and the answers produced by the model:", + "bbox": [ + 169, + 589, + 823, + 618 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Remove leading and trailing whitespaces.", + "- Remove punctuation.", + "- Lowercase everything." + ], + "bbox": [ + 215, + 630, + 506, + 686 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We perform the following additional post-processing for pretrained models:", + "bbox": [ + 171, + 699, + 669, + 714 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Remove leading \"Answer:\" or \"A:\" or the non-English equivalent from the output.", + "- Remove everything after the first newline." + ], + "bbox": [ + 215, + 726, + 769, + 761 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We perform the following additional post-processing for postrained models:", + "bbox": [ + 171, + 773, + 671, + 789 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Remove leading \"answer is:\"", + "- Detect the pattern \"X answer is X\", where X is the desired answer, and strip the unnecessary part in the middle.", + "- Remove training \"desu\" in Japanese." + ], + "bbox": [ + 215, + 801, + 823, + 873 + ], + "page_idx": 22 + }, + { + "type": "page_footnote", + "text": "11Using a judge-LLM may to some extent address this problem, but at the expense of other issues (e.g. Thakur et al., 2024).", + "bbox": [ + 169, + 883, + 823, + 911 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D Annotation instructions", + "text_level": 1, + "bbox": [ + 171, + 89, + 408, + 104 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Our annotation pipeline contains five stages: 1) locality rating, 2) question generation 3) question review, 4) question answering, and 5) translation. Below, we provide the annotation instructions for each of these stages.", + "bbox": [ + 169, + 121, + 826, + 165 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "D.1 Locality rating", + "text_level": 1, + "bbox": [ + 171, + 181, + 320, + 196 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "To narrow-down the initial selection of paragraphs – sampled from the top-rated Wikipedia pages of the respective locales – the first step in our annotation pipeline is locality rating. Given a paragraph, we ask annotators to rate whether the paragraph is locally relevant to the particular locale, on a likertscale from 1 to 5, where 1 refers to extremely local and relatively obscure topics very specifically related to the specific language or locale and with little international recognition and 5 to globally well-known topics. We also ask annotators to disregard pages about inappropriate or politically sensitive topics. The rubric for locality annotation can be found in Table 3. We disregard everything with a locality rating of 3 or lower.", + "bbox": [ + 169, + 207, + 826, + 319 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/fa2dfae75eef0a7bb838f0dfc25bd66e8d7acffe982633b0c359ac0cb43632f3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DescriptionExample
1.Extremely local and relatively obscure. Content that is of interest only to a small, localized group, such as a specific town, region, or community. These topics are typically obscure and not widely known beyond their immediate area.Local radio stations, small town historical events, regional businesses, or niche local cultural practices.
2.Regional interest. Topics that have some relevance beyond a specific locality but are still primarily of interest within a particular region or country.State or provincial politicians, regional cuisine, local sports teams, or medium-sized companies with regional influence.
3.National Significance. Content that is widely recognized within a single country, but relatively un-known internationally.National politicians (not internationally known), popular national media figures, major corporations within a country, or significant national historical events.
4.International recognition. Topics that are recognized and have relevance in multiple countries but may not be universally known across the globe. These topics often have international influence and are likely to be covered in international media, though their impact may vary by region.International brands which may be recognized in more than one country, celebrities with some international reach, significant cultural movements, or political conflicts with some awareness on the international stage.
5.Global prominence. Content that is widely recognized and relevant across a large number of countries around the world. These topics have a global impact or appeal and are likely to be well-represented in media across diverse cultures and regions.Globally famous celebrities (e.g., Cristiano Ronaldo), multinational corporations (e.g., Apple), major world events, or universally recognized cultural icons.
", + "bbox": [ + 173, + 333, + 823, + 747 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Table 3: Rubric for locality rating task. In the locality rating task, we ask the annotators to rate paragraphs with respect to how locally relevant the topic is to the locale.", + "bbox": [ + 169, + 750, + 823, + 779 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "D.2 Question generation", + "text_level": 1, + "bbox": [ + 171, + 816, + 357, + 830 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "The second and main annotation step in our pipeline is the step in which we ask annotators to generate questions about sampled paragraphs. We ask annotators to generate a challenging question with a short answer. The answer should be easy to evaluate with string-matching metrics, the questions should not be open-ended or have many possible correct answers, be ambiguous or subjective, and the expected short answer should be concise. To ensure difficulty, we ask that answering the question", + "bbox": [ + 169, + 842, + 825, + 912 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "requires combining information from different parts in the accompanying text; It should not be answerable by mere regurgitation of a single sentence. We furthermore ask that the question is formulated such that its answer will not change over time (e.g. not 'How many medals has Sifan Hassan won', but 'How many medals has Sifan Hassan won between 2018 and 2022 (including)'), and that the question is answerable also without the article (e.g. not 'How many tv shows did the person in this article produce?'). To facilitate validation checks in the next round, we also ask that the question authors write a longer answer to explain how they arrived at the short answer. We also ask the question authors to annotate what is the type of the correct answer (e.g. number, name, date, etc) In the pilot, we observed that – for some languages – the vast majority of questions were questions that required some form of numerical reasoning. Because the intention of the benchmark is to address knowledge more than reasoning, we afterwards restricted the number of numerical questions to $10\\%$ . Similarly, we asked question authors to avoid yes/no questions.", + "bbox": [ + 169, + 90, + 826, + 257 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "D.3 Question review", + "text_level": 1, + "bbox": [ + 171, + 273, + 330, + 287 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "In the first round of question review, we asked annotators from a different provider to judge whether the questions abide by the rules provided to the question authors. All question reviewers are native speakers. Specifically, we ask them to check if:", + "bbox": [ + 169, + 297, + 823, + 340 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The question pertains to a locally relevant topic", + "- The question is clear and understandable, and not subjective", + "- The question has a clear and concise answer", + "- If there are multiple possible variations of the answer possible (e.g. 'Dick Schoof' / 'Minister Dick Schoof' / 'Prime Minister Dick Schoof' / etc), all versions of the answer are provided.", + "- The question and answer are in the correct language", + "- The question is understandable without the article", + "- That the answer to the question will not likely change in the near future" + ], + "bbox": [ + 215, + 351, + 823, + 489 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "When a question can be fixed with a minor change (e.g. add a time indication to make sure an answer will not change in the near future, or add an extra answer version), we ask the question reviewers to implement this fix and describe it. In the pilot round, we use the annotator feedback to finetune our annotation protocol and provide feedback to the question-authors. During the rest of the data collection, we simply disregard questions that are not useable as is or can be corrected with minor changes.", + "bbox": [ + 169, + 501, + 823, + 584 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "D.4 Validation through question answering", + "text_level": 1, + "bbox": [ + 171, + 599, + 486, + 616 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "In the last stage of our question generation pipeline, we have additional annotators answer the sourced and reviewed question. The goal of this validation task is to confirm that the questions are answerable, correct, non-ambiguous when read by individuals other than the original question author, and that all possible versions of the answers are included. For each question, we ask two additional annotators to first answer the question, using the snippets the questions were sourced from for context. After they have answered the question, they are shown the list of reference answers written by the original author of the question as well as the rational they provided, and we ask them to reflect upon the answer they gave themselves. If their answer did not match any answer in the original reference list, we ask them to either add their answer to the list if it is semantically equivalent to their own answer or indicate which answer they believe to be correct, their own or the original answer. We disregard all questions where at least one annotator disagrees with the original question author.", + "bbox": [ + 169, + 626, + 826, + 779 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "E Related work", + "text_level": 1, + "bbox": [ + 171, + 797, + 321, + 813 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "In this paper, we introduce a new multilingual benchmark for LLMs, that we believe addresses gaps and pitfalls in existing benchmarks. We (concisely) outlined those gaps and pitfalls and mentioned several other works related to ours in the introduction of those paper. Here, we discuss multilingual evaluation of LLMs in more detail. Specifically, we discuss what datasets recent LLM releases have used for multilingual evaluation (Appendix E.1) and what other datasets and approaches they could have used but did not (Appendix E.2).", + "bbox": [ + 169, + 828, + 823, + 912 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/6097bbba21f178dc4be15a6d96ad58e0b1c3c7cc17e299a76a31147c2d105aca.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Claude 3.5 SonnetMGSM (Shi et al., 2023)
Gemini 2.0 FlashMentions multilingual audio, no multilingual benchmarks scores reported.
GPT4-oARC-Easy and TruthfulQA translated into five African languages (internal benchmark), Uhura-Eval (internal benchmark).
Llama 3.1MGSM (Shi et al., 2023), Multilingual MMLU (internal benchmark)
Mixtral 8x22Btranslated ARC-C, HellaSwag and MMLU (internal benchmarks)
Qwen2.5 72BM3Exam (Zhang et al., 2023), IndoMMLU (Koto et al., 2023), ruMMLU (Fenogenova et al., 2024), translated MMLU (Chen et al., 2023), Belebele (Bandarkar et al., 2024), XCOPA (Ponti et al., 2020), XWinograd (Muennighoff et al., 2023), XStoryClose (Lin et al., 2022), PAWS-X (Zhang et al., 2019), MGSM (Shi et al., 2023), Flores-101 (Goyal et al., 2022)
", + "bbox": [ + 176, + 88, + 823, + 275 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Table 4: Multilingual evaluation of recent LLM releases, overview. We provide an overview table of the benchmark for which scores are reported in the release papers or notes of the LLMs we evaluated in this paper. Models are sorted alphabetically.", + "bbox": [ + 169, + 279, + 823, + 324 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "E.1 Multilingual evaluation of LLMs in practice", + "text_level": 1, + "bbox": [ + 171, + 358, + 522, + 373 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "While multilinguality is something frequently mentioned in the release papers or posts of recent LLM releases, the datasets for which they actual report scores is in most cases quite limited. Of the models that we evaluated for this paper, Gemini 2.0 Flash reported no multilingual scores at all; GPT4-o and Mixtral 8x22B report scores only on internally translated but not publicly available English benchmarks; Claude 3.5 Sonnet reports scores for only one benchmark - MGSM. MGSM is also the only publicly available benchmark for which Llama 3.1 reports scores, along with - also - an internally translated version of MMLU that is not publicly available. The only model that extensively reports multilingual benchmark values, on more than 10 benchmarks, is Qwen2.5 72B. We provide an overview of the multilingual benchmarks for which scores are reported for these models in Table 4.", + "bbox": [ + 169, + 383, + 823, + 508 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "E.2 Multilingual evaluation options for LLMs", + "text_level": 1, + "bbox": [ + 171, + 523, + 504, + 539 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "While, as we discuss below, there are gaps and challenges with multilingual evaluation for LLMs, there are in fact many more options than is suggested by what is reported in recent releases. Below, we discuss other options for multilingual LLM evaluation.", + "bbox": [ + 169, + 549, + 826, + 592 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Translated English benchmarks As mentioned earlier on, benchmarks used for LLM evaluation are often translated English benchmarks. In some cases, the benchmarks were designed to evaluate only English and translated later, such as translated MMLU (e.g. Li et al., 2024; Chen et al., 2023; OpenAI, 2025; Singh et al., 2024) or MMLU-ProX (Xuan et al., 2025), MGSM (Shi et al., 2023) or MLAMA (Kassner et al., 2021). In other cases, the benchmark was multilingual at the time of its creation, but means of creation of the non-English data was through translating English sourced data, such as Belebele Bandarkar et al. (2024), Mintaka (Sen et al., 2022), or X-FACTR (Jiang et al., 2020). Taken together, translated benchmarks span quite a range of tasks, such as question answering (Artetxe et al., 2020; Lewis et al., 2020; Qi et al., 2023; Ohmer et al., 2023), natural language inference (Conneau et al., 2018), paraphrase detection (Zhang et al., 2019), general linguistic competence (Jumelet et al., 2025), reading comprehension (Artetxe et al., 2020; Bandarkar et al., 2024) and commonsense reasoning (Ponti et al., 2020), and even instruction following (He et al., 2024). With the exception of question answering and of course instruction following, however, many of these tasks have gone (somewhat) out of fashion for LLM evaluation, a trend which is mirrored also in the usage of their multilingual counterparts. As mentioned before, translated benchmarks have the advantage of containing parallel data, allowing for some form of comparability across languages, but are English-centric in content and may suffer from translationese (see e.g. Romanou et al., 2024; Chen et al., 2024, for a recent discussion of this).", + "bbox": [ + 169, + 604, + 826, + 854 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Multilingual benchmarks sourced from scratch Though much rarer, there are also benchmarks that are created independently for each language they include. Clark et al. (2020) release a question answering dataset separately sourced for 11 different languages, with a protocol relatively similar", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "to ours. In a different category, Hardalov et al. (2020), Zhang et al. (2023) and Romanou et al. (2024) and Sánchez et al. (2024) do not create benchmark data, but instead collect existing exam or competition questions from official human exams. In case of Zhang et al. (2023), the exams are graduation exams of primary, middle and high school; Hardalov et al. (2020) includes official state exams taken by graduating high school students, which may contain parallel pairs in case countries allow examinations to be taken in multiple languages; Romanou et al. (2024), cover academic exams at middle and high school and university level, professional certifications and licenses, and exams to obtain regional licenses. Sánchez et al. (2024) instead focus on questions from the International Linguistic Olympiad corpus. Lastly, as part of their study Ohmer et al. (2023) create a dataset called SIMPLE FACTS, containing factual questions created through a shared template filled in with language specific factual data.", + "bbox": [ + 169, + 90, + 826, + 243 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Consistency evaluation A rather different approach to assess multilinguality in LLMs is to focus not on accuracy across different languages, but to consider whether predictions are consistent across languages. This tests knowledge and skill transfer between languages more explicitly. Two recent examples of studies incorporating consistency-based evaluations on factual knowledge questions are Qi et al. (2023) and Ohmer et al. (2023). Qi et al. (2023) focuseses specifically on sample-level consistency of answers across different languages, requiring existing parallel benchmarks. Ohmer et al. (2023), instead, ask models to translate benchmark questions themselves before answering them again. This can, with some caveats, be applied to any existing monolingual benchmark, but – requiring multiple steps – it is more involved as a paradigm, and is somewhat bottlenecked by the translation ability of the model to be evaluated.", + "bbox": [ + 169, + 258, + 826, + 397 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Translation as a proxy for multilinguality Another, more implicit method to assess multilinguality in LLMs is to evaluate their ability to translate from one language to another. This approach was famously used by Brown et al. (2020), but has not been common since.", + "bbox": [ + 169, + 411, + 823, + 454 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Monolingual non-English evaluation In our discussion, we have focussed on multilingual evaluation options that cover multiple other languages. After all, a benchmark to evaluate models on Bengali (e.g. Shafayat et al., 2024) or Arabic (e.g. Alwajih et al., 2024) can contribute to multilingual evaluation when combined with other benchmarks, but does not so on its own. Because such benchmarks are usually created by language experts for the respective languages, they usually target locally relevant skills and knowledge and are likely of higher quality than benchmarks created for many languages simultaneously (either through translation or from scratch). Yet, composing a suite including many languages that allows direct comparisons between languages remains challenging. We believe such benchmarks can be important for multilingual evaluation in LLMs, but will not further discuss benchmarks focussing on individual languages or very small sets of languages within one family here.", + "bbox": [ + 169, + 468, + 826, + 621 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 26 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10356/15a8c18d-57d9-46bd-bbe1-f5ca7eeeb023_model.json b/data/2025/2504_10xxx/2504.10356/15a8c18d-57d9-46bd-bbe1-f5ca7eeeb023_model.json new file mode 100644 index 0000000000000000000000000000000000000000..5f1683abc24e9573d6fd2423d601b160bb68ff3e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/15a8c18d-57d9-46bd-bbe1-f5ca7eeeb023_model.json @@ -0,0 +1,3444 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.265, + 0.061, + 0.708 + ], + "angle": 270, + "content": "arXiv:2504.10356v2 [cs.CL] 15 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.228, + 0.123, + 0.771, + 0.175 + ], + "angle": 0, + "content": "MultiLoKo: a multilingual local knowledge benchmark for LLMs spanning 31 languages" + }, + { + "type": "text", + "bbox": [ + 0.359, + 0.226, + 0.638, + 0.241 + ], + "angle": 0, + "content": "Dieuwke Hupkes* Nikolay Bogoychev*" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.242, + 0.518, + 0.254 + ], + "angle": 0, + "content": "Meta" + }, + { + "type": "text", + "bbox": [ + 0.365, + 0.255, + 0.635, + 0.269 + ], + "angle": 0, + "content": "{dieuwkehupkes,nbogoych}@meta.com" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.305, + 0.538, + 0.32 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.335, + 0.768, + 0.64 + ], + "angle": 0, + "content": "We present MultiLoKo, a new benchmark for evaluating multilinguality in LLMs covering 31 languages. MultiLoKo consists of three partitions: a main partition consisting of 500 questions per language, separately sourced to be locally relevant to the specific language, and two translated partitions, containing human-authored translations from 30 non-English languages to English and vice versa. For comparison, we also release corresponding machine-authored translations. The data is equally distributed over two splits: a dev split and a blind, out-of-distribution test split. MultiLoKo can be used to study a variety of questions regarding the multilinguality of LLMs as well as meta-questions about multilingual benchmark creation. We compute MultiLoKo scores for 11 base and chat models marketed to be multilingual and study their average performance, their performance parity across languages, how much their ability to answer questions depends on the question language, and which languages are most difficult. None of the models we studied performs well on MultiLoKo, as indicated by low average scores as well as large differences between the best and worst scoring languages. Furthermore, we find a substantial effect of the question language, indicating suboptimal knowledge transfer between languages. Lastly, we find that using local vs English-translated data can result in differences more than 20 points for the best performing models, drastically change the estimated difficulty of some languages. For using machines instead of human translations, we find a weaker effect on ordering of language difficulty, a larger difference in model rankings, and a substantial drop in estimated performance for all models." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.663, + 0.312, + 0.679 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.693, + 0.825, + 0.806 + ], + "angle": 0, + "content": "With the growing presence and deployment of LLMs across the world, evaluating their abilities in languages other than English becomes more and more eminent. Yet, studying and evaluating multilinguality in LLMs remains a challenging enterprise, and it is hardly exaggerated to call the current state of multilingual evaluation in LLMs insufficient. Older multilingual benchmarks such as PAWS-X (Zhang et al., 2019), XNLI (Conneau et al., 2018) or XCOPA (Ponti et al., 2020) often do not fit the demands for evaluating auto-regressive models and are rarely used to evaluate recent models. Furthermore, their coverage of languages is relatively small compared to the number of languages in which LLMs are intended to be proficient." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.807, + 0.827, + 0.892 + ], + "angle": 0, + "content": "More often used for LLM evaluation are benchmarks translated from English, such as MGSM (translated GSM8K, Shi et al., 2023), MMMLU (tranlated MMLU, OpenAI, 2025) or (less frequently) Belebele (Bandarkar et al., 2024). These benchmarks provide good coverage over many languages, but using translated data comes with its own set of issues. One such issue is that even when human-rather than machine-authored translations are used, translated data is known to differ from native text in several ways (Clark et al., 2020). Furthermore, using translated benchmarks imposes a strong" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.899, + 0.319, + 0.913 + ], + "angle": 0, + "content": "*Equal contributions" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.162 + ], + "angle": 0, + "content": "English-centric bias: translated data may be multilingual on the surface, it is not in its content. The benchmarks MLQA (Lewis et al., 2020) and TidyQA (Clark et al., 2020) to some extent address the issue by sourcing data separately for different languages. Even in their sourcing protocols, however, there is no explicit focus on selecting locally relevant content for the chosen languages. In addition to that, their coverage is again small compared to the above mentioned translated benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.164, + 0.827, + 0.207 + ], + "angle": 0, + "content": "In response to these issues, we introduce a wide-coverage multilingual benchmark with locally-sourced questions for 31 different languages. Because the benchmark targets multilingual local knowledge, we dub it MultiLoKo. The release of MultiLoKo serves two interconnected goals:" + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.209, + 0.625, + 0.222 + ], + "angle": 0, + "content": "1) Provide a better means to evaluate multilinguality in LLMs;" + }, + { + "type": "text", + "bbox": [ + 0.208, + 0.225, + 0.782, + 0.239 + ], + "angle": 0, + "content": "2) Provide data to study the effect of various design choices in multilingual evaluation." + }, + { + "type": "list", + "bbox": [ + 0.208, + 0.209, + 0.782, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.242, + 0.827, + 0.381 + ], + "angle": 0, + "content": "To address our first goal, we create 500 questions per language, written from scratch for each language, using a sourcing protocol specifically designed to ensure local relevance of the question topics. To also reap the benefits of parallel data, we commissioned both human and machine-authored translations for all non-English questions into English and vice versa, providing a total of 15500 parallel questions, sourced across the 31 languages in the benchmark. The translated data facilitates the study of transfer between languages and also serves our second goal. By comparing the English-translated data with the locally sourced data, we can explicitly compare the adequacy of using translated benchmarks; by comparing human- with machine-authored translations, we can better estimate the potential issues of the latter. To prevent quick overfitting and inadvertent contamination, we release a development set of the benchmark, while test scores can only be obtained through an external provider.3" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.384, + 0.825, + 0.467 + ], + "angle": 0, + "content": "We provide elaborate analyses for both our goals. We compute average performance and language parity scores on the locally sourced data for 11 models marketed for their multilinguality (§ 5.1); we investigate whether these models exhibit knowledge transfer between different languages (§ 5.2); we study the impact of local sourcing versus translating on model rankings and language difficulty (§ 5.4.1); we analyse the difficulty of the included languages through various lenses (§ 5.3); and we conduct an analysis into the difference between human- and machine-authored translation (§ 5.4.3)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.469, + 0.825, + 0.567 + ], + "angle": 0, + "content": "We find that, of the models we consider, the best performing model is Gemini 2.0 Flash, with an average performance of 34.4 points, and an almost 35 point gap between the best and the worst language. Llama 3.1 405B and GPT4-o are close contenders in terms of average scores (34.3 and 34.0, respectively), but both have substantially higher language gaps (39 and 49 points). Almost across the board, model performances are better when questions are asked in the language to which the content is relevant, indicating suboptimal knowledge transfer between languages, a result that is mirrored by low response-consistency across question language." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.569, + 0.827, + 0.735 + ], + "angle": 0, + "content": "Next, we study the relevance of using locally sourced data as opposed to translated English data as well as whether it matters if translations are authored by humans or machines. We find that the estimated difficulty of some languages changes drastically across the two sourcing setups, within the range of 15 points decrease and 8 points increase on average across models. The rank correlation between average language difficulty score is 0.78. Furthermore, individual model scores between locally and English-translated data can differ up to 22 points for some languages. However, changing the sourcing setup does not impact model rankings, suggesting that using translated data may be suitable for comparing models but less for model development or language prioritisation. For using machine- instead of human-authored translations, as well, the effect on model ranking is limited \\((R = 0.97)\\), but the difficulty estimates of various languages changes with up to 12 points. Furthermore, using machine translated data results in lower average scores for all models, with drops ranging from 2 to \\(34\\%\\) of the human-translated scores." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.742, + 0.827, + 0.826 + ], + "angle": 0, + "content": "Outline In the remainder of this paper, we first describe our dataset collection protocol and the dataset itself in § 2 and § 3, respectively. In § 4, we describe our experimental setup. In § 5, we present a range of different results, covering (among other things), the summary of results described above. We conclude in § 6. As we discussed quite some related work above, we do not include a separate related work section in the main paper, but we do provide a discussion of a wider range of multilingual datasets in Appendix E." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.873 + ], + "angle": 0, + "content": "\\( {}^{2} \\) An exception to this is the benchmark EXAMS (Hardalov et al., 2020),which consists of exams separately sourced for each language. For reasons unknown to the authors of this work, it was never used for any prominent LLM release, with the exception of (Dubey et al., 2024), who deployed it for training rather than evaluation." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.874, + 0.825, + 0.911 + ], + "angle": 0, + "content": "3The MultiLoKo data, five few-shot examples per language, an evaluation script, a set of language-specific prompts, and information about test-score submissions can be found at https://github.com/facebookresearch/multiloko/." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.355, + 0.106 + ], + "angle": 0, + "content": "2 Dataset collection" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.122, + 0.825, + 0.192 + ], + "angle": 0, + "content": "The main data collection protocol of MultiLoKo is similar to the protocol used by the well-known benchmark SQuAD (Rajpurkar et al., 2016): we source articles from Wikipedia and ask annotators to generate questions about paragraphs sampled from these articles. After that, we run several rounds of quality control on the generated questions and commission human- and machine-authored translations of all data. Our collection protocol consists of five steps." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.2, + 0.827, + 0.298 + ], + "angle": 0, + "content": "Step 1: Paragraph selection The first step in our protocol is the sampling of the 6K most visited Wikipedia pages for each language for the period of 2016-2021. We sample paragraphs from those pages by randomly selecting a word in the page and expanding left and right until we reach 3K characters. Next, we ask annotators to judge the local relevance of the samples on a scale from 1 to 5, where 1 refers to topics specific to the language (e.g. a Swedish singer not known outside of Sweden) and 5 to globally well-known topics (e.g. 'Youtube'). We disregard all topics that have a locality score above 3. The full rubric and annotation instructions can be found in Appendix D.1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.307, + 0.825, + 0.392 + ], + "angle": 0, + "content": "Step 2: Question generation In step 2, we ask native speakers to generate challenging questions about the content in the paragraphs. To facilitate automatic scoring, we ask that the questions are closed-form questions, with only one correct short answer. To ensure that the annotation instructions are understandable and appropriate for each locale and the questions of high quality, we run a pilot with 50 questions separately for each language. After our pilot, we commission 500 additional samples for each language, to leave a \\(10\\%\\) margin to disregard questions in the rest of the process." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.4, + 0.825, + 0.47 + ], + "angle": 0, + "content": "Step 3: Question review For each generated question, we ask a new set of annotators from a separate provider to judge whether the generated questions abide by the annotation instructions, to flag any possible issues, and to mark if the question is useable as is, would be useable with a small adaptation or should be disregarded. We ask annotators to fix small annotators on the spot, and as respective vendors that questions with larger issues are replaced." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.478, + 0.827, + 0.562 + ], + "angle": 0, + "content": "Step 4: Question answering As a last quality control step, we ask two annotators different from the creator of the question to answer the questions. In this stage, we do not ask annotators to correct questions, but we simply disregard all questions for which either annotator thinks the original answer was incorrect, or the annotator provided an answer not matching the original answer because of ambiguities in the question. The only corrections we allow in this stage are additions of additional, semantically equivalent, correct answers (e.g. 'four' as an alternative to '4')." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.57, + 0.827, + 0.681 + ], + "angle": 0, + "content": "Step 5: Translation Lastly, we translate the non-English data back to English and vice versa. This effort serves two purposes. First, it allows to study generalisation of knowledge and skills between English and non-English languages through a direct comparison of the same questions. Second, it facilitates inspection of the topics and questions for all languages of the dataset, without the need to be able to speak all those languages. As automatic translation of benchmarks is relatively common practice in the field (e.g. Li et al., 2024), we commission both human and machine translations and study their difference as part of our analysis. For the machine translations, we use Google Translate sentence based cloud API.\\(^{4}\\)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.702, + 0.398, + 0.718 + ], + "angle": 0, + "content": "3 MultiLoKo the dataset" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.734, + 0.825, + 0.763 + ], + "angle": 0, + "content": "MultiLoKo consists of three main components: i) the collected data; ii) a set of multilingual prompts to prompt base- and chat models; and iii) a set of metrics." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.78, + 0.342, + 0.794 + ], + "angle": 0, + "content": "3.1 The collected data" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.806, + 0.617, + 0.822 + ], + "angle": 0, + "content": "The data in MultiLoKo consists of several partitions and two splits." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.829, + 0.827, + 0.886 + ], + "angle": 0, + "content": "Partitions MultiLoKo contains one main partition, containing locally-soured data for 31 languages, including English. In addition to that, it contains four translated partitions. Two of those are human-translated partitions: human-translated-from-english, consisting of human-authored translations of English data into the 30 other languages in MultiLoKo," + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.897, + 0.497, + 0.912 + ], + "angle": 0, + "content": "4https://cloud.google.com/translate?hl \\(\\equiv\\) en" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.827, + 0.204 + ], + "angle": 0, + "content": "human-translated-to-english containing human-authored translations of the non-English subsets into English. The other two are machine-translated partitions following the same pattern: machine-translated-from- english, contains machine-authored translations of English data into 30 other languages, and machine-translated-to- english contains machine-authored translations of the non-English subsets into English. All partitions contain 500 samples per language - thus in total 15500 samples in the main partition, and 15000 samples in the translated partitions. Statistics about the dataset such as the distribution over answer types and the average prompt length can be found in Appendix A. Results relating to the difficulty of the benchmark can be found in \\(\\S 5\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.212, + 0.828, + 0.351 + ], + "angle": 0, + "content": "Splits Each partition is divided equally over two splits: a dev split that can be used for development, and a blind test split. Each of these splits thus contains 250 samples per language. Until the test split is publicly released, results can only be obtained through model submissions. The splits are not random, but constructed such that for each language the most frequently visited pages are in the dev split while the least frequently visited pages are in the test split, roughly preserving the distribution of answer types (e.g. number, name, year, etc). The test split can thus be seen as an out-of-distribution (ood) split, specifically meant to assess generalisation (which is challenging in the context of LLMs, see e.g. Hupkes et al., 2023). In § 5.4.2 we provide an analysis of the extent to which the split is truly an ood split, by analysing its difficulty. The results reported in the results section of the paper are dev results." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.368, + 0.436, + 0.383 + ], + "angle": 0, + "content": "3.2 Prompts and few-shot examples" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.393, + 0.825, + 0.477 + ], + "angle": 0, + "content": "Running MultiLoKo requires prompts. In the spirit of getting truly multilingually appropriate results, we design prompts separately for each language and release them along with the data. The prompts are written by different linguistic experts for the various languages, in consultation with the benchmark creators to ensure they are appropriate for LLMs. We provide prompts for base models and chat models that allow for incorporating up to five few-shot examples, which we also provide.6 All prompts and few-shot examples can be found in the MultiLoKo repository." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.494, + 0.267, + 0.508 + ], + "angle": 0, + "content": "3.3 Metrics" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.519, + 0.825, + 0.577 + ], + "angle": 0, + "content": "MultiLoKo has two main metrics and two auxiliary metrics. The two main metrics - Exact Match accuracy (EM) and Gap - capture the overall performance of MultiLoKo and are computed on the main partition, whereas the two auxiliary metrics - Mother Tongue Effect (MTE) and Locality Effect (LE) - combine information from different partitions. We provide a cheat-sheet in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.584, + 0.825, + 0.709 + ], + "angle": 0, + "content": "EM and Gap EM indicates the performance of a model on a single language or averaged across languages, as measured by the percentage of times the model (after post-processing) provides an answer that verbatim matches one of the answers in the reference list. Gap, defined as the difference between the best and the worst performing language in the benchmark, is a measure of parity across the individual languages within the benchmark. Taken together, EM and Gap provide a good indication of how well a model is faring on MultiLoKo. Because both gap and EM are binary metrics that may be open to false negatives, we also considered the partial match metrics BLEU (Papineni et al., 2002), ChrF (Popovic, 2015) and contains. We did not find any novel patterns using those metrics, but include them in our implementation for future research." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.717, + 0.827, + 0.844 + ], + "angle": 0, + "content": "MTE Because of the 2x2 design of MultiLoKo, in which we translated non-English data back to English and vice versa, we can compute several metrics related to locality of the requested information. MTE is one of such metrics. It expresses the impact of asking a question in a language to which that question is relevant. We quantify MTE (for non-English languages only), as the difference between the EM score of the locally sourced data asked in the corresponding language (e.g. asking a question about a local Bengali radio station in Bengali) and the EM score when the same questions are asked in English. A positive MTE indicates that information is more readily available when it is relevant to the language in which it was asked, whereas a negative MTE indicates that the information is more easily accessible in English. MTE is a measure related to transfer as well as language proficiency." + }, + { + "type": "text", + "bbox": [ + 0.191, + 0.847, + 0.714, + 0.86 + ], + "angle": 0, + "content": "5More details can be found at https://github.com/facebookresearch/multiloko/." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.86, + 0.827, + 0.913 + ], + "angle": 0, + "content": "6In several recent works it has been shown that prompts can have a substantial impact on model scores (e.g. Weber et al., 2023; Mizrahi et al., 2024). Given the large number of languages in the benchmark and the fact that those are not all mastered by the main authors, we did not include a systematic search through prompts, but presented our best-effort results." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.177, + 0.089, + 0.825, + 0.297 + ], + "angle": 0, + "content": "
Average EMThe first main metric we use to quantify performance for MultiLoKo is the average Exact Match score across languages, which expresses how many of the answers match one of the gold standard answers verbatim (after post-processing the answers).
GapThe second main metric is the gap between a model's best and worst performing language. We gap to quantify the extent to which a model has achieved parity across languages. Because a small gap can be achieved both through parity on high scores as parity on low scores, it is most informative in combination with average benchmark performance.
Mother tongue effect (MTE)MTE expresses the impact of asking questions in a language in which the requested information is locally salient, compared to asking it in English. A positive MTE indicates information is more readily available in the language it was (likely) present in the training data, whereas a negative mother tongue effect indicates the information is more easily accessible in English.
Locality effect (LE)LE quantifies the effect of using locally sourced vs translated data. It is measured by computing the difference between scores for locally sourced data and translated English-sourced data. A positive LE implies that using translated English data underestimates performance on a language, a negative LE that using translated English data overestimates performance.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.303, + 0.825, + 0.334 + ], + "angle": 0, + "content": "Table 1: MultiLoKo metric cheatsheet. We use several metrics to quantify model performance using MultiLoKo. This table provides a cheatsheet for their meaning." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.37, + 0.828, + 0.496 + ], + "angle": 0, + "content": "LE The locality effect (LE) is a measure of how much performance on knowledge tasks is over- or underestimated through the use of using translated English data, as opposed to locally relevant data. We quantify the locality effect as the difference in EM for English translated data and locally sourced data. If for a language the English translated data has as a higher EM, the LE is positive, indicating that using English translated data likely overestimating a model's ability on providing knowledge for that language. If the LE is negative the English translated data may provide an underestimation of the score for that language. Note that because we often observe both positive and negative LEs for the 30 non-English languages in MultiLoKo, the average LE across languages may be small, even if the differences for individual languages may be large." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.517, + 0.373, + 0.536 + ], + "angle": 0, + "content": "4 Experimental setup" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.549, + 0.825, + 0.579 + ], + "angle": 0, + "content": "We test and showcase our benchmark by running experiments with 11 different models of varying sizes, that were all marketed to have multilingual abilities." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.59, + 0.267, + 0.605 + ], + "angle": 0, + "content": "4.1 Models" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.616, + 0.825, + 0.701 + ], + "angle": 0, + "content": "To test the extent to which MultiLoKo provides useful signal across training stages, we consider both base and chat models. The base models we include in our experiments are Llama 3.1 70B and 405B (Dubey et al., 2024), Mixtral 8x22B (team, 2024), and Qwen 2.5 72B (Qwen et al., 2025), the seven chat models are Gemini 2.0 Flash (Google DeepMind, 2024), GPT4-o (OpenAI et al., 2024), Claude 3.5 Sonnet (Anthropic, 2025), Llama 3.1 70B and 405B Chat, Mixtral 8x22B-it, and Qwen 2.5 72B instruct. As mentioned before, we run chat and base models with separate prompts." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.719, + 0.352, + 0.735 + ], + "angle": 0, + "content": "4.2 Experimental setup" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.745, + 0.825, + 0.789 + ], + "angle": 0, + "content": "We run all of our experiments with the generation temperature set to 0. To facilitate automatic evaluation, we include an instruction to answer questions curtly and precisely, producing only a number/name/location/etc. Full template information can be found in our github repository." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.794, + 0.825, + 0.823 + ], + "angle": 0, + "content": "Few-shot prompting For base models we use a 5-shot prompt. For chat models, we use a 0-shot prompt, as this is the most likely use mode by chat model users." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.829, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Post-processing Because base models are good at following the instructions, minimal postprocessing is needed: we only lowercase the output and strip punctuation. Chat models often deviate from the required format, especially in English, in various ways that we discuss in Appendix B. To evaluate such models beyond their instruction-following issues, we perform more complex post-processing, aiming to remove any words resembling \"answer\" from the LLM output, as well as several special cases for English and Japanese. We provide full details about post-processing in Appendix C." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.195, + 0.089, + 0.795, + 0.272 + ], + "angle": 0, + "content": "
ModelEMGapMother tongue effectLocality effect
Gemini 2.0 Flash34.39± 2.9034.806.12± 1.900.36± 3.40
Llama 3.1 405B34.31± 2.7039.206.37± 1.700.62± 2.70
GPT4-o33.97± 3.6048.803.08± 2.000.35± 2.90
Llama 3.1 405B Chat27.70± 3.2040.803.97± 2.20-1.11± 2.70
Llama 3.1 70B26.92± 2.6028.802.72± 1.70-0.30± 3.10
Claude 3.5 Sonnet26.89± 4.4047.6024.18± 4.200.81± 2.90
Llama 3.1 70B Chat21.65± 2.8042.400.49± 1.60-3.32± 3.30
Mixtral 8x22B21.64± 4.2043.60-2.18± 3.00-0.65± 2.60
Qwen2.5 72B19.66± 2.3028.402.45± 2.10-2.28± 2.70
Mixtral 8x22B-it10.10± 3.1039.20-5.41± 2.00-0.54± 1.70
Qwen2.5 72B instruct2.54± 0.708.00-1.52± 1.000.43± 0.70
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.276, + 0.825, + 0.318 + ], + "angle": 0, + "content": "Table 2: Aggregate results dev. We report average EM, gap, mother tongue effect and locality effect for all 11 models on the MultiLoKo dev split. For EM, MTE and LE, we also indicate a confidence interval equal to two times the standard error across languages. Models are sorted by average EM." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.351, + 0.268, + 0.367 + ], + "angle": 0, + "content": "5 Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.382, + 0.825, + 0.507 + ], + "angle": 0, + "content": "As MultiLoKo has several partitions, there are many different results that can be computed. On a high level, we consider four different types of results. First, in § 5.1, we report average model results across several categories, including the average performance and an indicator of parity across languages. Next, in § 5.2, we dive deeper into the knowledge transfer occurring from one language to another, within individual models. In § 5.3, instead, we focus on differences between individual languages. Lastly, in § 5.4, we look in more detail at the dataset itself through the lens of model results, considering in particular the effect of locally sourcing data as opposed to translating English sourced data (§ 5.4.1), differences between our dev and test split (§ 5.4.2) and the difference between using human and machine translated data (§ 5.4.3)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.523, + 0.499, + 0.539 + ], + "angle": 0, + "content": "5.1 Aggregate results: EM and language gap" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.825, + 0.619 + ], + "angle": 0, + "content": "In Table 2, we provide a summary of the average dev results. Specifically, for each model, we report average EM and the gap between the best and the worst language, along with average MTE and LE, which we will discuss in a later section.7 We report average MTE, EM and LE along with a confidence interval equal to two times the standard error across languages, roughly equalling previously used \\(95\\%\\) confidence intervals (Madaan et al., 2024; Dubey et al., 2024)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.633, + 0.406, + 0.648 + ], + "angle": 0, + "content": "5.1.1 Model performance (EM)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.656, + 0.827, + 0.809 + ], + "angle": 0, + "content": "In Figure 1a, we show a boxplot of the distribution of the EM scores across models, ordered by average EM. The best performing models are Gemini 2.0 Flash, Llama 3.1 405B, and GPT4-o, while Mixtras 8x22B and the Qwen2.5 72B populate the lower rankings on the list. Somewhat surprisingly, base models are generally outperforming chat models on the benchmark, this is partly due to false refusals and poor instruction following in the chat models. In some cases, however, the chat models simply just provide a qualitatively different answer than the base models. The figure shows that MultiLoKo is a relatively difficult benchmark across the board: the average EM of even the best performing model barely exceeds 30, while the bottom performing models have EM scores lower than 20. Also scores for the easiest languages (see also § 5.3) are capped below 50. Furthermore, for virtually all models performance varies starkly between languages, suggesting that none of the models we considered are evenly multilingual across the 31 languages in MultiLoKo." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.823, + 0.258, + 0.838 + ], + "angle": 0, + "content": "5.1.2 Gap" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.846, + 0.825, + 0.889 + ], + "angle": 0, + "content": "While average EM score provides some information about a model's multilingual abilities, the same EM score can hide many different patterns regarding individual language scores. As we appreciate it is not always practical to consider 31 separate EM scores in model development, we add a second" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.897, + 0.463, + 0.911 + ], + "angle": 0, + "content": "7A metric cheatsheet can be found in Table 1." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.09, + 0.526, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.307, + 0.287, + 0.393, + 0.3 + ], + "angle": 0, + "content": "(a) EM scores" + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.093, + 0.824, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.653, + 0.287, + 0.701, + 0.301 + ], + "angle": 0, + "content": "(b) Gap" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.308, + 0.825, + 0.339 + ], + "angle": 0, + "content": "Figure 1: EM distributions and Gap dev. (a) Boxplot of observed EM scores for each model, sorted by mean. (b) Difference between the best EM and the worst of the N next best EM scores, per model." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.363, + 0.825, + 0.394 + ], + "angle": 0, + "content": "summary metric to the main metrics of MultiLoKo: the gap between the best and worst performing languages, representative of the extent to which a model has achieved parity across languages." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.398, + 0.827, + 0.538 + ], + "angle": 0, + "content": "In Figure 1a, we already saw that the per-language scores have quite a range for all models. In Figure 1b, we study this in more detail, by considering the gap between the best language and the next N best language (30 corresponds to the full benchmark). On the right end of the plot, we see that already considering only 5 languages besides English, even the best perform has a gap of over five points - relatively large in absolute terms, very large in relative ones - between English and the worst of the remaining languages. For the second best two models, the top-5 gap even exceeds 10 points. As we include more languages, up to the full benchmark, the gap increases, with GPT4-0 showing gap of almost 50 points. The only models for which the gap is small are the models that have overall low performance and thus little space to drop from English, illustrating how gap and average EM provide complementary information about multilingual performance." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.553, + 0.436, + 0.568 + ], + "angle": 0, + "content": "5.2 Generalisation across languages" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.578, + 0.827, + 0.607 + ], + "angle": 0, + "content": "Next, we study whether knowledge generalises across languages or, in other words, whether knowledge transfers from one language to another." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.621, + 0.453, + 0.635 + ], + "angle": 0, + "content": "5.2.1 The mother tongue effect (MTE)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.644, + 0.827, + 0.826 + ], + "angle": 0, + "content": "First, we compare the EM of models when questions are asked in the language for which the questions were originally sourced with performance when the same questions are asked in English. We quantify this effect with the metric MTE, which expresses the difference in performance between these two settings (see § 3.3). In Figure 2a, we show MTE per language, averaged across models. For most languages, performance is higher when the question is asked in the language for which the question is locally relevant. The languages for which MTE is negative or close to 0 are virtually all languages that perform very poorly also in the mother tongue and for which there is therefore little room for further decrease. From one perspective, the improvements when questions are asked in the low-resource but native languages can be seen as surprising: as models perform much better in English than non-English languages, one may expect performances to go up as a consequence of that. On the other hand, similar 'mother tongue effects' have been observed in earlier studies. For example, Ohmer et al. (2024) found that models are comparatively better at answering factual questions about topics when they are asked in a language to which culture the fact pertains. It appears that also in our case," + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.834, + 0.825, + 0.884 + ], + "angle": 0, + "content": "With the used prompt, we could not get Claude 3.5 Sonnet to answer questions in English in an automatically parsable manner, leading to an abysmal score of 4.8 on the English sourced data and equally low scores on data translated into English. Examples of this issue can be found in Appendix B. Because this issue is not indicative of lack of knowledge or transfer, we excluded Claude 3.5 Sonnet from any of the transfer results in this section." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.912 + ], + "angle": 0, + "content": "The exception to this rule is Hindi, which has a reasonable performance in the native language but nevertheless improves in English. We further discuss such language-specific points in \\(\\S 5.3\\)." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.834, + 0.825, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.177, + 0.093, + 0.592, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.29, + 0.273, + 0.482, + 0.286 + ], + "angle": 0, + "content": "(a) Average MTE across models" + }, + { + "type": "image", + "bbox": [ + 0.607, + 0.097, + 0.804, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.631, + 0.272, + 0.776, + 0.285 + ], + "angle": 0, + "content": "(b) KDE of MTE scores" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.293, + 0.825, + 0.351 + ], + "angle": 0, + "content": "Figure 2: Mother tongue effect dev. (a) Per language MTE for MultiLoKo dev, indicating the difference between questions asked in the mother tongue (locally relevant) and in English. Error bars indicate 2 times standard error across all models, excluding Claude 3.5 Sonnet. (b) KDE plot of the distribution of MTE scores for the top-3 performing models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.381, + 0.825, + 0.424 + ], + "angle": 0, + "content": "the effect of accessibility of information in a relevant language wins out over the generally stronger English performance, pointing to a gap in models' ability to generalise knowledge from one language to another." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.827, + 0.528 + ], + "angle": 0, + "content": "In Figure 2b, we further consider the distribution of MTE scores for the top-3 models. Interestingly, this distribution is quite different between models. Despite having comparable average scores, the top-3 performing models differ in their MTE distributions across languages. Of the three models, GPT4-o has the smallest average effect (3.2); Llama 3.1 405B has a much higher average effect (6.6), but less probability mass on the more extreme ranges of the spectrum (min max values of \\([-7, +12]\\) vs \\([-9, +13]\\)) Gemini 2.0 Flash is in the middle in terms of average (6.3), but shows the largest variation across languages \\([-10, +16]\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.534, + 0.825, + 0.59 + ], + "angle": 0, + "content": "Note, however, that without studying the actual training data of the various models, it is possible to infer that all these models have relatively poor transfer across languages, but not conclusively say that one model is better than another: it is also possible that the information sourced for languages with better MTEs was simply better represented in the English data of a respective model." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.61, + 0.427, + 0.625 + ], + "angle": 0, + "content": "5.2.2 Consistency across responses" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.636, + 0.825, + 0.734 + ], + "angle": 0, + "content": "Another way to study transfer between languages is to look at the consistency of responses across languages (previously also used by Qi et al., 2023; Ohmer et al., 2023, i.a.). After all, it is possible for a model that has an EM of 30 on both English and another language to be nevertheless completely misaligned on which questions they respond to correctly. Studying consistency across responses can therefore be seen as a more direct way of studying whether knowledge is equally accessible across languages. Furthermore, consistency can be studied independently from accuracy, as it is possible for a model to have very good transfer, but be simply consistently wrong." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.739, + 0.827, + 0.865 + ], + "angle": 0, + "content": "In the dataset used by Ohmer et al. (2023), the correct answers (consisting of names, numbers and years) are identical across the languages they consider, while Qi et al. (2023) use a factual knowledge task that requires ranking outputs. Neither of their metrics can thus be directly applied in our case. Specifically, measuring consistency on incorrect responses – an important component of the work of Ohmer et al. (2023) because it can provide positive rather than negative evidence – would require assessing whether two answers in different languages are to be considered semantically equivalent, which is not practically feasible for our data. Rather, we opt for a simpler consistency metric, which quantifies what percentage of the questions that are answered correctly in either language are answered correctly in both languages." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "In Figure 3a, we show the average consistency of all models (excluding again Claude Sonnet 3.5); for completeness, we also show the per-language consistency results in Figure 3b. The results confirm our earlier conclusion that much improvements can be made when it comes to knowledge transfer" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.177, + 0.094, + 0.391, + 0.228 + ], + "angle": 0, + "content": "
ModelConsistency
Gemini 2.0 Flash0.46± 0.04
Llama 3.1 405B0.46± 0.04
Llama 3.1 70B0.45± 0.03
GPT4-o0.45± 0.05
Llama 3.1 405B Chat0.42± 0.04
Qwen2.5 72B0.40± 0.04
Llama 3.1 70B Chat0.40± 0.04
Mixtral 8x22B0.36± 0.05
Mixtral 8x22B-it0.21± 0.05
Qwen2.5 72B instruct0.08± 0.03
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.185, + 0.269, + 0.384, + 0.284 + ], + "angle": 0, + "content": "(a) Consistency scores per model" + }, + { + "type": "image", + "bbox": [ + 0.405, + 0.093, + 0.821, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.505, + 0.269, + 0.72, + 0.284 + ], + "angle": 0, + "content": "(b) Consistency scores per language" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.291, + 0.825, + 0.334 + ], + "angle": 0, + "content": "Figure 3: Consistency results dev. (a) Average per-model consistency scores, \\(\\pm 2\\) times the standard error across languages. (b) Boxplot of model consistency scores per language, indicating the relative overlap of correctly answered questions when asked in the mother tongue vs in English." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.36, + 0.825, + 0.391 + ], + "angle": 0, + "content": "between languages: even for the best performing models, there is an overlap of not even \\(50\\%\\) between the questions correctly answered across languages." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.406, + 0.427, + 0.421 + ], + "angle": 0, + "content": "5.3 Differences between languages" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.431, + 0.825, + 0.46 + ], + "angle": 0, + "content": "So far, with the exception of MTE and parity scores, we have primarily looked at results averaged across languages. Now, we consider language-specific results in a bit more detail." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.475, + 0.82, + 0.768 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.779, + 0.825, + 0.809 + ], + "angle": 0, + "content": "Figure 4: Average EM per language dev, in mother tongue and English. Top: Average EM on locally sourced data. Bottom: Average EM on locally sourced data, translated to English." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.832, + 0.527, + 0.848 + ], + "angle": 0, + "content": "5.3.1 Language difficulty on locally sourced data" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "First, in Figure 4 (top), we show average model results for all languages on all locally sourced data. In broad strokes, the order of difficulty is correlated with how low- or high- resource a language is to be considered: while languages such as French, English and Spanish occur at the easier end of the spectrum, we find Farsi, Khmer and Malay among the most difficult languages. There are a few" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.187, + 0.092, + 0.581, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.261, + 0.48, + 0.274 + ], + "angle": 0, + "content": "(a) Locality effect per language" + }, + { + "type": "table", + "bbox": [ + 0.604, + 0.096, + 0.798, + 0.219 + ], + "angle": 0, + "content": "
ModelRank correlation language difficulty
Gemini 2.0 Flash0.54
Llama 3.1 405B0.65
GPT4-o0.64
Llama 3.1 405B Chat0.70
Llama 3.1 70B0.60
Claude 3.5 Sonnet0.84
Llama 3.1 70B Chat0.68
Mixtral 8x22B0.86
Qwen2.5 72B0.45
Mixtral 8x22B-it0.88
Qwen2.5 72B instruct0.55
" + }, + { + "type": "table_caption", + "bbox": [ + 0.598, + 0.26, + 0.808, + 0.274 + ], + "angle": 0, + "content": "(b) Language difficulty correlations" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.281, + 0.825, + 0.351 + ], + "angle": 0, + "content": "Figure 5: Locality Effect dev. (a) Per language Locality Effect, indicating the difference in assigned scores between locally sourced and translated English data. A positive LE means the locally sourced data has a higher score (is easier), a negative LE the English sourced data has a higher score. (b) Per-model rank correlation between language difficulty of languages on locally sourced vs English translated data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.379, + 0.825, + 0.435 + ], + "angle": 0, + "content": "notable exceptions: on average the second highest scoring language in our benchmark is Tagalog. While it is difficult to judge why without doing a detailed analysis on the questions, we hypothesise that the questions asked by the Tagalog language experts are simply less complex than the questions of other languages." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.45, + 0.62, + 0.466 + ], + "angle": 0, + "content": "5.3.2 Separating language difficulty from language proficiency" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.474, + 0.827, + 0.627 + ], + "angle": 0, + "content": "In an attempt to distinguish data difficulty from language proficiency, we consider also the difficulty of the locally sourced data translated to English. While this conflates data difficulty and transfer (see § 5.2), it still gives us some indication of the extent to which low performance in languages is caused by poor language proficiency versus data difficulty. In the bottom half of Figure 4, we show the model performances as computed on the locally sourced data translated to English. The correlation between these two language difficulty rankings between these setups is 0.79. When comparing the ranks of the various languages, only a handful of languages shift more than a few places. Specifically, Bengali \\((26->4)\\), Urdu \\((26->12)\\), and Hindi \\((14->5)\\) all decrease substantially in difficulty rank. The fact that they are comparatively easier in English suggests that for those languages proficiency may be a larger problem than data difficulty. On the other hand, only Russian \\((7->21)\\) shows a drop of more than 5 places." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.643, + 0.295, + 0.657 + ], + "angle": 0, + "content": "5.4 The dataset" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.669, + 0.825, + 0.725 + ], + "angle": 0, + "content": "Lastly, we discuss two aspects related to the creation of the dataset. Specifically, we consider the impact of local sourcing vs translated English data, and we have a look at the dataset split across dev and test. We consider the difference between using human-authored as opposed to machine-authored translations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.74, + 0.563, + 0.755 + ], + "angle": 0, + "content": "5.4.1 Locally-sourced vs translated-from-English data" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.764, + 0.825, + 0.793 + ], + "angle": 0, + "content": "To study the impact of using locally sourced data, we consider the difference between per-language EM on locally sourced data and translated from English data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.808, + 0.827, + 0.879 + ], + "angle": 0, + "content": "Language difficulty First, we look at per-language differences between locally sourced and translated English data. We quantify this difference in a metric we call the Locality Effect (LE). The size of the locality effect tells us how much the estimate of a model's strength in a particular language would have been off if we had chosen to use a translated benchmark rather than a locally sourced one. We plot this difference in Figure 5a." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.827, + 0.913 + ], + "angle": 0, + "content": "As we can see, the scores between locally and translated English-sourced data can differ quite drastically, almost 15 percentage points averaged across models. For individual models, the differences are" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.176 + ], + "angle": 0, + "content": "even larger. For Llama 3.1 405B, the locality effect ranges from -13 to +17; for Gemini 2.0 Flash from -21 to +15; and for GPT4-o from -22 to +14. The differences are not just in absolute scores; also the ordering of language by difficulty is quite different across the two data collection setups, as can be seen by the per-model rank correlations of language difficulty between the two conditions, shown in Figure 5b. Using English-translated rather than locally sourced data does thus not only provide different estimates, but may suggest different languages to focus on for improvement." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.189, + 0.827, + 0.274 + ], + "angle": 0, + "content": "Model rankings Next, we consider the ranking of the models under the two different data regimes. Interestingly, given the transfer effect, changing from locally to English translated data does not make any difference in the ranking. Also in terms of absolute scores, the difference between the two data collection setups is relatively minor. At least for our type of data, it thus appears that using translated data as opposed to locally sourced data may be a reasonable setup for comparing models on average, though not for getting adequate per-language or set language prioritisation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.287, + 0.342, + 0.303 + ], + "angle": 0, + "content": "5.4.2 The dataset split" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.311, + 0.825, + 0.435 + ], + "angle": 0, + "content": "As mentioned in the dataset construction, we took the deliberate decision to generate a split based on topic frequency, rather than creating a random split. The aim of this out-of-distribution split is to test generalisation to topics that are more in the tail of the distribution, as well as encourage improvements in multilinguality beyond having a higher score on the specific released MultiLoKo dev set. Of course, however, because of our sourcing method, all the topics in MultiLoKo are topics on which information is available on Wikipedia. As training data, Wikipedia is often packaged as a single scrape, this may render our deliberate splitting efforts futile: the fact that a page is less visited does not make it less likely that the specific page is included in the training data. Now, we test if the dev and test split are in fact distributionally different." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.452, + 0.819, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.696, + 0.825, + 0.738 + ], + "angle": 0, + "content": "Figure 6: Average EM, dev versus test. We show the difference in score distributions between the MultiLoKo dev and test set. The results confirm that the test set is indeed out of distribution with respect to the dev set: dev scores (upper bars) are higher across the board." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.825, + 0.837 + ], + "angle": 0, + "content": "In Figure 6, we show boxplots of dev and test EM scores for all models under consideration. The plot confirms that the split is indeed to be considered an OOD split: for virtually much all models, the test scores are lower than the dev scores. Across all models, the average dev score is 24, whereas the average test score is 21. This suggests that our test set does indeed contain more tail knowledge than the dev set, despite the aforementioned arguments regarding Wikipedia. Interestingly, this implies that Wikipedia may not be the primary source from which models learn this information." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.825, + 0.913 + ], + "angle": 0, + "content": "The difference in difficulty also has bearing on the other metrics: the parity scores (thus: the gap between the best and worst performing language) is 37 for dev vs 34 for test, suggesting that more difficult dat may to some extent hide differences between languages and therefore exemplifying the utility of considering parity along with overall performance. The mother tongue effect, on the other hand, is comparable across dev and test (1.61 vs 1.56, respectively). For the locality effect, the" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.133 + ], + "angle": 0, + "content": "effect is less interpretable. While the average difference is substantial (-0.6 dev vs -1.9 test), there is no clear pattern discernable across languages: for some, the effect reduces, whereas for others it increases." + }, + { + "type": "table", + "bbox": [ + 0.18, + 0.158, + 0.406, + 0.26 + ], + "angle": 0, + "content": "
ModelRmin Δmax Δavg Δ
Gemini 2.0 Flash0.80-10.0021.604.35
Llama 3.1 405B0.83-4.4018.805.82
GPT4-o0.85-6.0021.604.46
Llama 3.1 405B Chat0.80-10.4022.403.08
Llama 3.1 70B0.77-7.6022.004.59
Claude 3.5 Sonnet0.90-9.6020.802.84
Llama 3.1 70B Chat0.87-6.0020.003.12
Mixtral 8x22B0.91-3.2020.004.13
Qwen2.5 72B0.83-4.0016.803.47
Mixtral 8x22B-it0.92-4.8012.402.41
Qwen2.5 72B instruct0.80-0.803.200.36
" + }, + { + "type": "image", + "bbox": [ + 0.43, + 0.149, + 0.797, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.524, + 0.314, + 0.702, + 0.326 + ], + "angle": 0, + "content": "(b) MT vs human translations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.301, + 0.413, + 0.327 + ], + "angle": 0, + "content": "(a) Language difficulty stats across human- and machine translations" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.335, + 0.825, + 0.392 + ], + "angle": 0, + "content": "Figure 7: Machine versus human translations dev. (a) Per-model rank correlation between language difficulty between MT and human translations, and min, max and average difference between the two conditions. (b) Difference between EM computed on human- and machine-translated data (human score - machine score), per language." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.417, + 0.471, + 0.43 + ], + "angle": 0, + "content": "5.4.3 Human versus machine translation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.44, + 0.825, + 0.496 + ], + "angle": 0, + "content": "Lastly, we consider the impact of using machine- or human-authored translations. To do so, we look at the differences in EM scores between machine and human translated data for the various languages, taking the human translations as the 'gold standard' (i.e. we consider human translated EM - machine translated EM). We show the results in Figure 7." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.502, + 0.826, + 0.613 + ], + "angle": 0, + "content": "In Figure 7a we show the rank correlations of the difficulties of the various languages per model, as well as the min, max and average drop from human to machine translations. We see the that, at the model level, using machine translations rather than human translations results in a systematic undervaluation of the model scores: there is not a single model for which the 'drop' from human to machine translations is negative on average. In part, this is may be a result of the previously observed lack of knowledge transfer effect. That the drop is not substantially lower for models with better transfer, however, suggests that the more impactful factor is the quality of the machine translations, that may at times result in unanswerable questions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.62, + 0.825, + 0.689 + ], + "angle": 0, + "content": "In terms of model rankings, the difference between machine and human translations is minor: the model rankings between the two conditions have a rank correlation of 0.97 on the dev split, with only three local swaps (2&3 and 5&6 and 8&9) of models that did not have statistically different scores to begin with. This suggests that to compare models, using machine translation can be an acceptable alternative to human translations, as the mis-estimation is systematic across models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.695, + 0.826, + 0.779 + ], + "angle": 0, + "content": "Considering the effect across languages, we observe that even though the average drop is positive, for virtually all models there are at least some languages for which performance increases when MT is used, in some cases with even more than 10 points. For a handful of languages - specifically Russian, Swedish and Urdu - this is also true across models (see Figure 7b). While the overall rank correlation is high for language difficulty (0.88), it thus still urges caution in using machine translated data for language improvement prioritisation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.798, + 0.301, + 0.814 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.828, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Notwithstanding the increasing multinational deployment of LLMs in many parts of the world, adequately evaluating their multilinguality remains a challenging enterprise. Only in part is this due to the scarcity of high-quality and broad-coverage multilingual benchmarks for LLM: perhaps a more pressing issue is that the benchmarks that are frequently used for multilingual evaluation virtually all consist of translated English data. While using completely parallel data has its advantages, using translated English data imposes an English-centric bias on the content of the benchmarks," + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.218 + ], + "angle": 0, + "content": "implying that even if the benchmark evaluates multilinguality on the surface, it does not in content. In our work, we aim to address this by presenting MultiLoKo, a multilingual benchmark spanning 31 languages that combines the best of both worlds. MultiLoKo contains 500 questions targeting locally relevant knowledge for 31 languages, separately sourced for each language with a protocol specifically designed to ensure local relevance of the question topics. It is also fully parallel, because it contains human-authored translations of the non-English partitions into English and vice versa. As such, it allows to study various questions related to multilinguality, transfer and multilingual benchmark creation. To prevent quick overfitting and inadvertent contamination, we release a development set of the benchmark, while the test set of the benchmarks remains private, at least for the near future." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.223, + 0.828, + 0.336 + ], + "angle": 0, + "content": "We use MultiLoKo to analyse 4 base and 7 chat models marketed to be multilingual. We find that, of the models we consider, the best performing model is Gemini 2.0 Flash, with an average performance of 34.4 points, and an almost 35 point gap between the best and the worst language, followed by Llama 3.1 405B and GPT4-o, which are close contenders in terms of average performance but both have substantially higher language gaps (39 and 49 points). Generally, scores are better when questions are asked in the language to which they are relevant, indicating suboptimal knowledge transfer between languages, a result that is mirrored by low per-sample consistency across question language." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.34, + 0.828, + 0.506 + ], + "angle": 0, + "content": "On a meta-level, we study the relevance of using locally sourced data as opposed to translated English data as well as whether it matters if translations are authored by humans or machines. We find that the estimated difficulty of some languages changes drastically across the two sourcing setups, within the range of 15 points decrease and 8 points increase on average across models. The rank correlation between average language difficulty score is 0.78. Furthermore, individual model scores between locally and English-translated data can differ up to 22 points for some languages. However, changing the sourcing setup does not impact model rankings, suggesting that using translated data may be suitable for comparing models but less for model development or language prioritisation. For using machine- instead of human-authored translations, as well, the effect on model ranking is limited \\((\\mathrm{R} = 0.97)\\), but the difficulty estimates of various languages changes with up to 12 points. Furthermore, using machine translated data results in lower average scores for all models, with drops ranging from 2 to \\(34\\%\\) of the human-translated scores." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.512, + 0.828, + 0.625 + ], + "angle": 0, + "content": "While our results section is extensive already, there are still several parts of MultiLoKo that we did not explore. For instance, because of the sourcing strategy, each native question is coupled with a paragraph that contains the answer to the question. MultiLoKo could thus be transformed into a reading-comprehension benchmark, and we consider studying the difference between the knowledge and reading comprehension setup an interesting direction for future work. Furthermore, each question contains an elaborate long answer intended to explain the short answer. We have not used the long answers in any of our experiments, but foresee interesting directions including studies into CoT prompting or studying answer rationales." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.647, + 0.307, + 0.663 + ], + "angle": 0, + "content": "7 Limitations" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.68, + 0.585, + 0.695 + ], + "angle": 0, + "content": "In this last section, we discuss various limitations of our work." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.713, + 0.828, + 0.783 + ], + "angle": 0, + "content": "Local relevance In our sourcing protocol, we explicitly sought to create questions locally relevant to the respective languages. It is important to notice, however, that some languages, such as English, Spanish, Portuguese, Chinese, French and to a lesser extent German and Dutch cover a wide variety of cultures. We did not separately control for that and the data for those languages thus likely comprises a mix of different locales." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.828, + 0.913 + ], + "angle": 0, + "content": "Data quality Building a bias-free evaluation datasets with few mistakes is not an easy feat. Even though we implemented several rounds of quality checks in our data collection pipeline, when looking at outputs we still incidentally found mistakes in the data or answers. We fixed some of these mistakes as we encountered them, but it is quite likely that more such mistakes occur in the dataset. It is also important to point out that we are less likely to spot such issues for languages that we do not understand at all, potentially creating a bias towards the set of languages for which we have a rudimentary understanding. Overall, however, we believe that the pipeline we designed assures a dataset of high quality. Of course, we welcome reports of mistakes spotted by others in the data." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.175, + 0.091, + 0.825, + 0.298 + ], + "angle": 0, + "content": "Evaluation Because MultiLoKo is a generative benchmark, computing scores requires comparisons of a generated answer with a set of gold answers. A first obstacle to this method of evaluation is that it is hard to create an exhaustive list of correct short-form answers. This is especially true when the correct answer is not a number, date, title or something else that can be expressed only in a few ways. In addition to that, it is hard to incentivise LLMs to produce concise answers. Even when instructed to answer with only a number / date / name / title, they may respond with a full sentence, add a reasoning trail to their answer, or add words beyond the minimal answer in a different fashion. We addressed such issues that were systematic in post-processing (see Appendix B), but it is hard to a priori catch allthe ways that LLMs may deviate from the requested protocols. In some cases, we found additional post-processing steps that increased the scores of some models only later in the process, because scores for particular languages looked suspiciously low. For instance, we had not initially realised that our punctuation stripper did not strip punctuation in Urdu, which specifically influenced GPT4-o and Gemini. We considered several other metrics as well as judges, but eventually found that EM provided the clearest and least biased signal. It remains, however, a challenge to evaluate chatty LLMs completely independently from their ability to follow instructions." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.322, + 0.825, + 0.487 + ], + "angle": 0, + "content": "Wikipedia as information source MultiLoKo, as several other both multilingual as well as monolingual benchmarks, uses Wikipedia as main source of information. This has the advantage that Wikipedia has a large coverage across many different languages and the information is considered to be of high quality. It also facilitates comparable sourcing across languages. Of course, it also poses limitations. For one, it still provides a bias to the specific topics that can be included, that are usually primarily knowledge based. In fact, MultiLoKo is indeed a knowledge benchmark; it does not consider other types of skills. Secondly, and perhaps more importantly, Wikipedia is a corpus frequently used in the training data of LLMs. The fact that MultiLoKo is a challenging benchmark even given that (multilingual) wikipedia is likely included in the training data of most of the LLMs evaluated suggests that this is not a large issue at the moment. However, it is very possible that MultiLoKo can be 'hacked' relatively easily simply by strongly oversampling multilingual wikipedia data." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.517, + 0.338, + 0.535 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.552, + 0.825, + 0.663 + ], + "angle": 0, + "content": "While this paper knows only two authors, this benchmark would not have been possible without the support and contributions of many people. We wish to thank all of them in this last section. First, we thank Van Phung, Kriz Chan, Antonio Gai, Dunant Hin and Emily Du for their support on facilitating and streamlining interactions with vendors for the data collection process, and Milena Hoffman for her indispensable administrative support in managing the data collection process. We would furthermore like to thank Van Phung and Kriz Chan for their continued help on ensuring data quality, saliency checking output, brainstorming and general support throughout the creation of the benchmark." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.669, + 0.825, + 0.85 + ], + "angle": 0, + "content": "We also thank the linguists that helped us for their contributions to the analysis of the pilot questions in the benchmark, which played an important role in finetuning and improving our annotation protocol as well as disregard inappropriate questions, and for helping us design prompt templates to allow language-specific querying of models in different stages for each of the languages in MultiLoKo. Specifically, we would like to thank Abdul Haque (Urdu), Aleksandra Antokhina (Russian), Ananya Banerjee (Bengali), Firman Tahar (Indonesian), Florian Mouret (French), Francisco Paredes Maldonado (Spanish), Eriko Nakamura (Japanese), Julie Lee (Korean), Khanh Tien (Vietnamese), Miao Yeh (Traditional Chinese), Renata Barboza (Portuguese), Rishabh Goel (Hindi), Sanket Suhas Satope (Marathi), Sara Martellini (Italian) and Silvia Aponte (German). We thank Kriz Chan by streamlining our collaboration with these linguists, and Maria Paez Playa for offering her teams time on this enterprise. We furthermore thank Sabrina Qiao for providing resources for quick-turnaround QA support, and Ateeq Awan (English), Kaila Conley-Coversi (Italian), Semanti Roy (Bengali) and Shahmir Shaikh (English) for delivering this QA support." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.855, + 0.823, + 0.911 + ], + "angle": 0, + "content": "As doing manual saliency checks is challenging for a multilingual benchmark, we also relied on the help of several colleagues to debug small issues, detect errors in questions and prompts and double check annotation judgements. We would like to thank Anna Prochowska, Daria Dudurca, Diego Perino, Etai Sella, Ivan John Piramide, Lovish Madaan, Yanir Kleiman for their help on this." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.937, + 0.508, + 0.947 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.115, + 0.829, + 0.228 + ], + "angle": 0, + "content": "Fakhraddin Alwajih, Gagan Bhatia, and Muhammad Abdul-Mageed. Dallah: A dialect-aware multimodal large language model for Arabic. In Nizar Habash, Houda Bouamor, Ramy Eskander, Nadi Tomeh, Ibrahim Abu Farha, Ahmed Abdelali, Samia Touileb, Injy Hamed, Yaser Onaizan, Bashar Alhafni, Wissam Antoun, Salam Khalifa, Hatem Haddad, Imed Zitouni, Badr AlKhamissi, Rawan Almatham, and Khalil Mrini, editors, Proceedings of The Second Arabic Natural Language Processing Conference, pages 320-336, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.arabicnlp-1.27. URL https://aclanthology.org/2024.arabicnlp-1.27/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.239, + 0.829, + 0.267 + ], + "angle": 0, + "content": "Anthropic. Claude 3.5 sonnet. https://www.anthropic.com/news/claude-3-5-sonnet, 2025. Accessed: 2025-04-11." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.279, + 0.829, + 0.351 + ], + "angle": 0, + "content": "Mikel Artetxe, Sebastian Ruder, and Dani Yogatama. On the cross-lingual transferability of monolingual representations. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault, editors, Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4623–4637, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.421. URL https://aclanthology.org/2020.acl-main.421/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.362, + 0.829, + 0.461 + ], + "angle": 0, + "content": "Lucas Bandarkar, Davis Liang, Benjamin Muller, Mikel Artetxe, Satya Narayan Shukla, Donald Husa, Naman Goyal, Abhinandan Krishnan, Luke Zettlemoyer, and Madian Khabsa. The belebele benchmark: a parallel reading comprehension dataset in 122 language variants. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 749-775, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.44. URL https://aclanthology.org/2024.acl-long.44/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.472, + 0.829, + 0.624 + ], + "angle": 0, + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin, editors, Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/1457c0d6bcbd4967418bf8ac142f64a-Abstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.637, + 0.829, + 0.68 + ], + "angle": 0, + "content": "Pinzhen Chen, Simon Yu, Zhicheng Guo, and Barry Haddow. Is it good data for multilingual instruction tuning or just bad multilingual evaluation for large language models? CoRR, abs/2406.12822, 2024. URL https://doi.org/10.48550/arXiv.2406.12822." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.691, + 0.829, + 0.735 + ], + "angle": 0, + "content": "Zhihong Chen, Shuo Yan, Juhao Liang, Feng Jiang, Xiangbo Wu, Fei Yu, Guiming Hardy Chen, Junying Chen, Hongbo Zhang, Li Jianquan, et al. Multilingualsift: Multilingual supervised instruction fine-tuning, 2023. URL https://arxiv.org/pdf/2412.15115." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.746, + 0.829, + 0.815 + ], + "angle": 0, + "content": "Jonathan H. Clark, Eunsol Choi, Michael Collins, Dan Garrette, Tom Kwiatkowski, Vitaly Nikolaev, and Jennimaria Palomaki. TyDi QA: A benchmark for information-seeking question answering in typologically diverse languages. Transactions of the Association for Computational Linguistics, 8: 454-470, 2020. doi: 10.1162/tacl_a_00317. URL https://aclanthology.org/2020.tacl-1. 30/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.828, + 0.829, + 0.913 + ], + "angle": 0, + "content": "Alexis Conneau, Rudy Rinott, Guillaume Lample, Adina Williams, Samuel Bowman, Holger Schwenk, and Veselin Stoyanov. XNLI: Evaluating cross-lingual sentence representations. In Ellen Riloff, David Chiang, Julia Hockenmaier, and Jun'ichi Tsujii, editors, Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2475–2485, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1269. URL https://aclanthology.org/D18-1269/." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.115, + 0.829, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.091, + 0.827, + 0.341 + ], + "angle": 0, + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurélien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Rozière, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Allonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Grégoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel M. Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, and et al. The llama 3 herd of models. CoRR, abs/2407.21783, 2024. doi: 10.48550/ARXIV.2407.21783. URL https://doi.org/10.48550/arXiv.2407.21783." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.35, + 0.827, + 0.476 + ], + "angle": 0, + "content": "Alena Fenogenova, Artem Chervyakov, Nikita Martynov, Anastasia Kozlova, Maria Tikhonova, Albina Akhmetgareeva, Anton Emelyanov, Denis Shevelev, Pavel Lebedev, Leonid Sinev, Ulyana Isaeva, Katerina Kolomeytseva, Daniil Moskovskiy, Elizaveta Goncharova, Nikita Savushkin, Polina Mikhailova, Anastasia Minaeva, Denis Dimitrov, Alexander Panchenko, and Sergey Markov. MERA: A comprehensive LLM evaluation in Russian. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9920–9948, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.534. URL https://aclanthology.org/2024.acl-long.534/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.485, + 0.827, + 0.527 + ], + "angle": 0, + "content": "Google DeepMind. Google gemini ai update - December 2024. https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/, 2024. Accessed: 2025-04-11." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.537, + 0.826, + 0.607 + ], + "angle": 0, + "content": "Naman Goyal, Cynthia Gao, Vishrav Chaudhary, Peng-Jen Chen, Guillaume Wenzek, Da Ju, Sanjana Krishnan, Marc'Aurelio Ranzato, Francisco Guzmán, and Angela Fan. The Flores-101 evaluation benchmark for low-resource and multilingual machine translation. Transactions of the Association for Computational Linguistics, 10:522-538, 2022. doi: 10.1162/tacl_a_00474. URL https://aclanthology.org/2022.tacl-1.30/." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.616, + 0.827, + 0.713 + ], + "angle": 0, + "content": "Momchil Hardalov, Todor Mihaylov, Dimitrina Zlatkova, Yoan Dinkov, Ivan Koychev, and Preslav Nakov. EXAMS: A multi-subject high school examinations dataset for cross-lingual and multilingual question answering. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5427-5444, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.438. URL https://aclanthology.org/2020.emnlp-main.438/." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.723, + 0.827, + 0.807 + ], + "angle": 0, + "content": "Yun He, Di Jin, Chaoqi Wang, Chloe Bi, Karishma Mandyam, Hejia Zhang, Chen Zhu, Ning Li, Tengyu Xu, Hongjiang Lv, Shruti Bhosale, Chenguang Zhu, Karthik Abinav Sankararaman, Eryk Helenowski, Melanie Kambadur, Aditya Tayade, Hao Ma, Han Fang, and Sinong Wang. Multi-if: Benchmarking llms on multi-turn and multilingual instructions following. CoRR, abs/2410.15553, 2024. doi: 10.48550/ARXIV.2410.15553. URL https://doi.org/10.48550/arXiv.2410.15553." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.817, + 0.827, + 0.86 + ], + "angle": 0, + "content": "Dieuwke Hupkes, Mario Giulianielli, Verna Dankers, Mikel Artetxe, Yanai Elazar, Tiago Pimentel, Christos Christodoulopoulos, Karim Lasri, Naomi Saphra, Arabella Sinclair, et al. A taxonomy and review of generalization research in nlp. Nature Machine Intelligence, 5(10):1161-1174, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.87, + 0.827, + 0.912 + ], + "angle": 0, + "content": "Zhengbao Jiang, Antonios Anastasopoulos, Jun Araki, Haibo Ding, and Graham Neubig. X-FACTR: Multilingual factual knowledge retrieval from pretrained language models. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Methods in Natural Language Processing (EMNLP), pages 5943-5959, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.479. URL https://aclanthology.org/2020.emnlp-main.479/." + }, + { + "type": "ref_text", + "bbox": [ + 0.176, + 0.144, + 0.826, + 0.187 + ], + "angle": 0, + "content": "Jaap Jumelet, Leonie Weissweiler, and Arianna Bisazza. Multiblimp 1.0: A massively multilingual benchmark of linguistic minimal pairs. CoRR, abs/2504.02768, 2025. URL https://doi.org/10.48550/arXiv.2504.02768." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.197, + 0.827, + 0.28 + ], + "angle": 0, + "content": "Nora Kassner, Philipp Duffer, and Hinrich Schütze. Multilingual LAMA: Investigating knowledge in multilingual pretrained language models. In Paola Merlo, Jorg Tiedemann, and Reut Tsarfaty, editors, Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 3250-3258, Online, April 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.eacl-main.284. URL https://aclanthology.org/2021.eacl-main.284/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.291, + 0.827, + 0.375 + ], + "angle": 0, + "content": "Fajri Koto, Nurul Aisyah, Haonan Li, and Timothy Baldwin. Large language models only pass primary school exams in Indonesia: A comprehensive test on IndoMMLU. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12359-12374, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.760. URL https://aclanthology.org/2023.emnlp-main.760/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.385, + 0.827, + 0.456 + ], + "angle": 0, + "content": "Patrick Lewis, Barlas Oguz, Rudy Rinnott, Sebastian Riedel, and Holger Schwenk. MLQA: Evaluating cross-lingual extractive question answering. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault, editors, Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7315–7330, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.653. URL https://aclanthology.org/2020.acl-main.653/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.466, + 0.827, + 0.55 + ], + "angle": 0, + "content": "Haonan Li, Yixuan Zhang, Fajri Koto, Yifei Yang, Hai Zhao, Yeyun Gong, Nan Duan, and Timothy Baldwin. CMMLU: Measuring massive multitask language understanding in Chinese. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 11260–11285, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.671. URL https://aclanthology.org/2024.findings-acl.671/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.56, + 0.827, + 0.672 + ], + "angle": 0, + "content": "Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, and Xian Li. Few-shot learning with multilingual generative language models. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 9019-9052, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.616. URL https://aclanthology.org/2022.emnlp-main.616/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.681, + 0.827, + 0.724 + ], + "angle": 0, + "content": "Lovish Madaan, Aaditya K Singh, Ryan Schaeffer, Andrew Poulton, Sanmi Koyejo, Pontus Stenetorp, Sharan Narang, and Dieuwke Hupkes. Quantifying variance in evaluation benchmarks. arXiv preprint arXiv:2406.10229, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.734, + 0.827, + 0.79 + ], + "angle": 0, + "content": "Moran Mizrahi, Guy Kaplan, Dan Malkin, Rotem Dror, Dafna Shahaf, and Gabriel Stanovsky. State of what art? a call for multi-prompt LLM evaluation. Transactions of the Association for Computational Linguistics, 12:933-949, 2024. doi: 10.1162/tacl_a_00681. URL https://aclanthology.org/2024.tacl-1.52/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.8, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Niklas Muennighoff, Thomas Wang, Lintang Sutawika, Adam Roberts, Stella Biderman, Teven Le Scao, M Saiful Bari, Sheng Shen, Zheng Xin Yong, Hailey Schoelkopf, Xiangru Tang, Dragomir Radev, Alham Fikri Aji, Khalid Almubarak, Samuel Albanie, Zaid Alyafeai, Albert Webson, Edward Raff, and Colin Raffel. Crosslingual generalization through multitask finetuning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15991-16111, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.891. URL https://aclanthology.org/2023.acl-long.891/." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.175 + ], + "angle": 0, + "content": "Xenia Ohmer, Elia Bruni, and Dieuwke Hupkes. Separating form and meaning: Using self-consistency to quantify task understanding across multiple senses. In Sebastian Gehrmann, Alex Wang, João Sedoc, Elizabeth Clark, Kaustubh Dhole, Khyathi Raghavi Chandu, Enrico Santus, and Hoorman Sedghamiz, editors, Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM), pages 258-276, Singapore, December 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023.gem-1.22/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.186, + 0.826, + 0.241 + ], + "angle": 0, + "content": "Xenia Ohmer, Elia Bruni, and Dieuwke Hupke. From form(s) to meaning: Probing the semantic depths of language models using multisense consistency. Computational Linguistics, 50(4):1507-1556, 12 2024. ISSN 0891-2017. doi: 10.1162/coli_a_00529. URL https://doi.org/10.1162/coli_a_00529." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.252, + 0.825, + 0.279 + ], + "angle": 0, + "content": "OpenAI. Mmmlu dataset. https://huggingface.co/datasets/openai/MMMLU, 2025. Accessed: 2025-04-11." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.292, + 0.826, + 0.912 + ], + "angle": 0, + "content": "OpenAI,., Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, Aleksander Madry, Alex Baker-Whitcomb, Alex Beutel, Alex Borzunov, Alex Carney, Alex Chow, Alex Kirillov, Alex Nichol, Alex Paine, Alex Renzin, Alex Tachard Passos, Alexander Kirillov, Alex Christakis, Alexis Conneau, Ali Kamali, Allan Jabri, Allison Moyer, Allison Tam, Amadou Crookes, Amin Tootoochian, Amin Tootoonchian, Ananya Kumar, Andrea Vallone, Andrej Karpathy, Andrew Braunstein, Andrew Cann, Andrew Codispoti, Andrew Galu, Andrew Kondrich, Andrew Tulloch, Andrey Mishchenko, Angela Baek, Angela Jiang, Antoine Pelisse, Antonia Woodford, Anuj Gosalia, Arka Dhar, Ashley Pantuliano, Avi Nayak, Avital Oliver, Barret Zoph, Behrooz Ghorbani, Ben Leimberger, Ben Rossen, Ben Sokolowsky, Ben Wang, Benjamin Zweig, Beth Hoover, Blake Samic, Bob McGrew, Bobby Spero, Bogo Giertler, Bowen Cheng, Brad Lightcap, Brandon Walkin, Brendan Quinn, Brian Guarraci, Brian Hsu, Bright Kellogg, Brydon Eastman, Camillo Lugaresi, Carroll Wainwright, Cary Bassin, Cary Hudson, Casey Chu, Chad Nelson, Chak Li, Chan Jun Shern, Channing Conger, Charlotte Barette, Chelsea Voss, Chen Ding, Cheng Lu, Chong Zhang, Chris Beaumont, Chris Hallacy, Chris Koch, Christian Gibson, Christina Kim, Christine Choi, Christine McLeavey, Christopher Hesse, Claudia Fischer, Clemens Winter, Coley Czarnecki, Colin Jarvis, Colin Wei, Constantin Koumouzelis, Dane Sherburn, Daniel Kappler, Daniel Levin, Daniel Levy, David Carr, David Farhi, David Mely, David Robinson, David Sasaki, Kenny Jin, Dev Valladares, Dimitris Tsipras, Doug Li, Duc Phong Nguyen, Duncan Findlay Edede Oiwoh, Edmund Wong Ehsan Asdar Elizabeth Proehl Elizabeth Yang Eric Antonow Eric Kramer Eric Peterson Eric Sigler Eric Wallace Eugene Brevdo Evan Mays Farzad Khorasani Felipe Petroski Such Filippo Raso Francis Zhang Fred von Lohmann Freddie Sult Gabriel Goh Gene Oden Geoff Salmon Giulio Starace Greg Brockman Hadi Salman Haiming Bao Haitang Hu Hannah Wong Haoyu Wang Heather Schmidt Heather Whitney Heewoo Jun Hendrik Kirchner Henrique Ponde de Oliveira Pinto Hongyu Ren Huiwen Chang Hyung Won Chung Ian Kivlichan Ian O'Connell Ian O'Connell Ian Osband Ian Silber Ian Sohl Ibrahim Okuyucu Ikai Lan Ilya Kostrikov Ilya Sutskever Ingmar Kanitscheider Ishaan Gulrajani Jacob Coxon Jacob Menick Jakub Pachocki James Aung James Betker James Crooks James Lennon Jamie Kiros Jan Leike Jane Park Jason Kwon Jason Phang Jason Teplitz Jason Wei Jason Wolfe Jay Chen Jeff Harris Jenia Varavva Jessica Gan Lee Jessica Shieh Ji Lin Jiahui Yu Jiayi Weng Jie Tang Jieqi Yu Joanne Jang Joaquin Quinonero Candela Joe Beutler Joe Landers Joel Parish Johannes Heidecke John Schulman Jonathan Lachman Jonathan McKay Jonathan Uesato Jonathan Ward Jong Wook Kim Joost Huizinga Jordan Sitkin Jos Kraaijeveld Josh Gross Josh Kaplan Josh Snyder Joshua Achiam Joy Jiao Joyce Lee Juntang Zhuang Justyn Harriman Kai Fricke Kai Hayashi Karan Singhal Katy Shi Kevin Karthik Kayla Wood Kendra Rimbach Kenny Hsu Kenny Nguyen Keren Gu-Lemberg Kevin Button Kevin Liu Kiel Howe Krithika Muthukumar Kyle Luther Lama Ahmad Larry Kai Lauren Itow Lauren Workman Leher Pathak Leo Chen Li Jing Lia Guy Liam Fedus Liang Zhou Lien Mamitsuka Lilian Weng Lindsay McCallum Lindsey Held Long Ouyang Louis Feuvrier Lu Zhang Lukas Kondraciuk Lukasz Kaiser Luke Hewitt Luke Metz Lyric Doshi Mada Aflak Maddie Simens Madelaine Boyd Madeleine Thompson Marat Dukhan Mark Chen Mark Gray Mark Hudnall Marvin Zhang Marwan Aljubeh Mateusz Litwin Matthew Zeng Max Johnson Maya Shetty Mayank Gupta Meghan Shah Mehmet Yatbaz Meng Jia Yang Mengchao Zhong Mia Glaese Mianna Chen Michael Janner Michael Lampe Michael Petrov Michael Wu Michele Wang Michelle Fradin Michelle Pokrass Miguel Castro Miguel Oom Temudo de Castro Mikhail Pavlov Miles" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.826, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.189, + 0.091, + 0.827, + 0.409 + ], + "angle": 0, + "content": "Brundage, Miles Wang, Minal Khan, Mira Murati, Mo Bavarian, Molly Lin, Murat Yesildal, Nacho Soto, Natalia Gimelshein, Natalie Cone, Natalie Staudacher, Natalie Summers, Natan LaFontaine, Neil Chowdhury, Nick Ryder, Nick Stathas, Nick Turley, Nik Tezak, Nik Felix, Nithanth Kudige, Nitish Keskar, Noah Deutsch, Noel Bundick, Nora Puckett, Ofir Nachum, Ola Okelola, Oleg Boiko, Oleg Murk, Oliver Jaffe, Olivia Watkins, Olivier Godement, Owen Campbell-Moore, Patrick Chao, Paul McMillan, Pavel Belov, Peng Su, Peter Bak, Peter Bakkum, Peter Deng, Peter Dolan, Peter Hoeschele, Peter Welinder, Phil Tillet, Philip Pronin, Philippe Tillet, Prafulla Dhariwal, Qiming Yuan, Rachel Dias, Rachel Lim, Rahul Arora, Rajan Troll, Randall Lin, Rapha Gontijo Lopes, Raul Puri, Reah Miyara, Reimar Leike, Renaud Gaubert, Reza Zamani, Ricky Wang, Rob Donnelly, Rob Honsby, Rocky Smith, Rohan Sahai, Rohit Ramchandani, Romain Huet, Rory Carmichael, Rowan Zellers, Roy Chen, Ruby Chen, Ruslan Nigmatullin, Ryan Cheu, Saachi Jain, Sam Altman, Sam Schoenholz, Sam Toizer, Samuel Miserendino, Sandhini Agarwal, Sara Culver, Scott Ethersmith, Scott Gray, Sean Grove, Sean Metzger, Shamez Hermani, Shantanu Jain, Shengjia Zhao, Sherwin Wu, Shino Jomoto, Shirong Wu, Shuaiqi, Xia, Sonia Phene, Spencer Papay, Srinivas Narayanan, Steve Coffey, Steve Lee, Stewart Hall, Suchir Balaji Tal Broda Tal Stramer, Tao Xu, Tarun Gogineni, Taya Christianson, Ted Sanders, Tejal Patwardhan, Thomas Cunninghamman, Thomas Degry, Thomas Dimson, Thomas Raoux, Thomas Shadwell, Tianhao Zheng Todd Underwood,Todor Markov,Toki Sherbakov,Tom Rubin Tom Stasi Tomer Kaftan. Tristan Heywood,Troy Peterson,Tyce Walters,Tyna Eloundou,V Valerie Qi,Veit Moeller,Vinnie Monaco,Vishal Kuo,Vlad Fomenko,Wayne ChangWeiyi ZhengWenda ZhouWesam Manassra Will Sheu Wojciech Zaremba,Yash Patil Yilei Qian Yongjik Kim Youlong ChengYu Zhang. Yuchen He,Yuchen Zhang,Yujia Jin,Yunxing Dai,and Yury Malkov.Gpt-4o system card2024. URL https://arxiv.org/abs/2410.21276." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.416, + 0.827, + 0.486 + ], + "angle": 0, + "content": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Pierre Isabelle, Eugene Charniak, and Dekang Lin, editors, Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA, July 2002. Association for Computational Linguistics. doi: 10.3115/1073083.1073135. URL https://aclanthology.org/P02-1040/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.493, + 0.827, + 0.577 + ], + "angle": 0, + "content": "Edoardo Maria Ponti, Goran Glavaš, Olga Majewska, Qianchu Liu, Ivan Vulić, and Anna Korhonen. XCOPA: A multilingual dataset for causal commonsense reasoning. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2362-2376, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.185. URL https://aclanthology.org/2020.emnlp-main.185/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.584, + 0.827, + 0.655 + ], + "angle": 0, + "content": "Maja Popovic. chrF: character n-gram F-score for automatic MT evaluation. In Ondrej Bojar, Rajan Chatterjee, Christian Federmann, Barry Haddow, Chris Hokamp, Matthias Huck, Varvara Logacheva, and Pavel Pecina, editors, Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392-395, Lisbon, Portugal, September 2015. Association for Computational Linguistics. doi: 10.18653/v1/W15-3049. URL https://aclanthology.org/W15-3049/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.661, + 0.827, + 0.731 + ], + "angle": 0, + "content": "Jirui Qi, Raquel Fernández, and Arianna Bisazza. Cross-lingual consistency of factual knowledge in multilingual language models. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 10650-10666, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.658. URL https://aclanthology.org/2023.emnlp-main.658/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.738, + 0.827, + 0.835 + ], + "angle": 0, + "content": "Qwen,.; An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tianyi Tang, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.843, + 0.827, + 0.912 + ], + "angle": 0, + "content": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. SQuAD: 100,000+ questions for machine comprehension of text. In Jian Su, Kevin Duh, and Xavier Carreras, editors, Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2383-2392, Austin, Texas, November 2016. Association for Computational Linguistics. doi: 10.18653/v1/D16-1264. URL https://aclanthology.org/D16-1264/." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.091, + 0.827, + 0.271 + ], + "angle": 0, + "content": "Angelika Romanou, Negar Foroutan, Anna Sotnikova, Zeming Chen, Sree Harsha Nelaturu, Shivalika Singh, Rishabh Maheshwary, Micol Altomare, Mohamed A. Haggag, Snegha A, Alfonso Amayuelas, Azril Hafizi Amirudin, Viraat Aryabumi, Danylo Boiko, Michael Chang, Jenny Chim, Gal Cohen, Aditya Kumar Dalmia, Abraham Diress, Sharad Duwal, Daniil Dzenhaliou, Daniel Fernando Erazo Florez, Fabian Farestam, Joseph Marvin Imperial, Shayekh Bin Islam, Perttu Isotalo, Maral Jabbarishiviari, Borje F. Karlsson, Eldar Khalilov, Christopher Klamm, Fajri Koto, Dominik Krzeminski, Gabriel Adriano de Melo, Syrielle Montariol, Yiyang Nan, Joel Niklaus, Jekaterina Novikova, Johan Samir Obando Ceron, Debjit Paul, Esther Ploeger, Jebish Purbey, Swati Rajwal, Selvan Sunitha Ravi, Sara Rydell, Roshan Santhosh, Drishti Sharma, Marjana Prifti Skenduli, Arshia Soltani Moakhar, Bardia Soltani Moakhar, Ran Tamir, Ayush Kumar Tarun, Azmine Toushik Wasi, Thenuka Ovin Weerasinghe, Serhan Yilmaz, Mike Zhang, Imanol Schlag, Marzieh Fadaee, Sara Hooker, and Antoine Bosselut. INCLUDE: evaluating multilingual language understanding with regional knowledge, 2024. URL https://doi.org/10.48550/arXiv.2411.19799." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.283, + 0.827, + 0.338 + ], + "angle": 0, + "content": "Eduardo Sánchez, Belen Alastruey, Christophe Ropers, Pontus Stenetorp, Mikel Artetxe, and Marta R. Costa-jussà. Linguini: A benchmark for language-agnostic linguistic reasoning. CoRR, abs/2409.12126, 2024. doi: 10.48550/ARXIV.2409.12126. URL https://doi.org/10.48550/arXiv.2409.12126." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.351, + 0.827, + 0.463 + ], + "angle": 0, + "content": "Priyanka Sen, Alham Fikri Aji, and Amir Saffari. Mintaka: A complex, natural, and multilingual dataset for end-to-end question answering. In Nicoletta Calzolari, Chu-Ren Huang, Hansaem Kim, James Pustejovsky, Leo Wanner, Key-Sun Choi, Pum-Mo Ryu, Hsin-Hsi Chen, Lucia Donatelli, Heng Ji, Sadao Kurohashi, Patrizia Paggio, Nianwen Xue, Seokhwan Kim, Younggyun Hahm, Zhong He, Tony Kyungil Lee, Enrico Santus, Francis Bond, and Seung-Hoon Na, editors, Proceedings of the 29th International Conference on Computational Linguistics, pages 1604-1619, Gyeongju, Republic of Korea, October 2022. International Committee on Computational Linguistics. URL https://aclanthology.org/2022.coling-1.138/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.474, + 0.827, + 0.545 + ], + "angle": 0, + "content": "Sheikh Shafayat, H Hasan, Minhajur Mahim, Rifki Putri, James Thorne, and Alice Oh. BEnQA: A question answering benchmark for Bengali and English. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1158-1177, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10. 18653/v1/2024-findings-acl.68. URL https://aclanthology.org/2024-findings-acl.68/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.556, + 0.827, + 0.626 + ], + "angle": 0, + "content": "Freda Shi, Mirac Suzgun, Markus Freitag, Xuezhi Wang, Suraj Srivats, Soroush Vosoughi, Hyung Won Chung, Yi Tay, Sebastian Ruder, Denny Zhou, Dipanjan Das, and Jason Wei. Language models are multilingual chain-of-thought reasoners. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. URL https://openreview.net/forum?id=fR3wGck-IXp." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.638, + 0.827, + 0.735 + ], + "angle": 0, + "content": "Shivalika Singh, Angelika Romanou, Clémentine Fourrier, David Ifeoluwa Adelani, Jian Gang Ngui, Daniel Vila-Suero, Peerat Limkonchotiwat, Kelly Marchisio, Wei Qi Leong, Yosephine Susanto, Raymond Ng, Shayne Longpre, Wei-Yin Ko, Madeline Smith, Antoine Bosselut, Alice Oh, Andre F. T. Martins, Leshem Choshen, Daphne Ippolito, Enzo Ferrante, Marzieh Fadaee, Beyza Ermis, and Sara Hooker. Global MMLU: understanding and addressing cultural and linguistic biases in multilingual evaluation. CoRR, abs/2412.03304, 2024. doi: 10.48550/ARXIV.2412.03304. URL https://doi.org/10.48550/arXiv.2412.03304." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.748, + 0.825, + 0.775 + ], + "angle": 0, + "content": "Mistral AI team. Cheaper, better, faster, stronger, 2024. URL https://mistral.ai/news/mixtral-8x22b. Accessed: 4-Apr-2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.788, + 0.825, + 0.83 + ], + "angle": 0, + "content": "Aman Singh Thakur, Kartik Choudhary, Venkat Srinik Ramayapally, Sankaran Vaidyanathan, and Dieuwke Hupkes. Judging the judges: Evaluating alignment and vulnerabilities in Ilms-as-judges. CoRR, abs/2406.12624, 2024. URL https://doi.org/10.48550/arXiv.2406.12624." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.842, + 0.827, + 0.912 + ], + "angle": 0, + "content": "Lucas Weber, Elia Bruni, and Dieuwke Hupkes. Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning. In Jing Jiang, David Reitter, and Shumin Deng, editors, Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL), pages 294-313, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.conll-1.20. URL https://aclanthology.org/2023.conll-1.20/." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.162 + ], + "angle": 0, + "content": "Weihao Xuan, Rui Yang, Heli Qi, Qingcheng Zeng, Yunze Xiao, Yun Xing, Junjue Wang, Huitao Li, Xin Li, Kunyu Yu, Nan Liu, Qingyu Chen, Douglas Teodoro, Edison Marrese-Taylor, Shijian Lu, Yusuke Iwasawa, Yutaka Matsuo, and Irene Li. Mmlu-prox: A multilingual benchmark for advanced large language model evaluation. CoRR, abs/2503.10497, 2025. URL https://doi.org/10.48550/arXiv.2503.10497." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.17, + 0.828, + 0.269 + ], + "angle": 0, + "content": "Wenxuan Zhang, Mahani Aljunied, Chang Gao, Yew Ken Chia, and Lidong Bing. M3exam: A multilingual, multimodal, multilevel benchmark for examining large language models. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine, editors, Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/117c5c8622b0d539f74f6d1fb082a2e9-Abstract-Datasets_and_Benchmarks.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.277, + 0.828, + 0.361 + ], + "angle": 0, + "content": "Yuan Zhang, Jason Baldridge, and Luheng He. PAWS: Paraphrase adversaries from word scrambling. In Jill Burstein, Christy Doran, and Thamar Solorio, editors, Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1298-1308, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1131. URL https://aclanthology.org/N19-1131/." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.361 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.442, + 0.106 + ], + "angle": 0, + "content": "A Additional dataset statistics" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.122, + 0.738, + 0.138 + ], + "angle": 0, + "content": "For reference, we provide a few dataset statistics beyond the main results in the paper." + }, + { + "type": "image", + "bbox": [ + 0.208, + 0.147, + 0.787, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.323, + 0.825, + 0.366 + ], + "angle": 0, + "content": "Figure 8: Distribution of output types on the dev split. We show the normalised distribution of correct output types across languages, ordered (from bottom to top) by average frequency. Rare output types that occur only a few times are mapped to the category 'other'." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.387, + 0.825, + 0.47 + ], + "angle": 0, + "content": "Output type distribution In Figure 8, we show the per-language distribution of output types for MultiLoKo dev split.10 We mapped very rare output types, such as 'a quantity', 'a period of time' or 'letter' to 'other', for plotting purposes. We can see that name is the most common output type across languages, followed by the generic output type a word and number. Also place and date are relatively common output types, whereas most other output types occur very infrequently or only for a handful of languages." + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.487, + 0.773, + 0.773 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.779, + 0.825, + 0.809 + ], + "angle": 0, + "content": "Figure 9: Average question and answer lengths. We show the per-question average length (in words) of the locally-sourced questions and answers, human-translated into English." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.828, + 0.825, + 0.884 + ], + "angle": 0, + "content": "Input and output length In addition to that, we show the average question – and output lengths of human-translated the locally sourced questions to English in Figure 9. While there is some variation in particular in question length, the lengths of the answers are relatively consistent. The average answer length is around 2, combining one-word answers with (usually) longer names." + }, + { + "type": "page_footnote", + "bbox": [ + 0.188, + 0.897, + 0.694, + 0.912 + ], + "angle": 0, + "content": "10Because the test split is blind, we do not report the distribution of output types here." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.387, + 0.108 + ], + "angle": 0, + "content": "B Instruction following" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.122, + 0.827, + 0.193 + ], + "angle": 0, + "content": "To facilitate evaluation, we instruct models to answer question with only a number/place/etc. Overall, we found that base models (with a five-shot template) are much better at abiding by this instruction than chat models, which exhibit a number of pathologies. While some of those can be caught with appropriate post-processing (see Appendix C, this is not the case for all issues. Below, we provide a summary of the main instruction-following issues we encountered with chat models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.202, + 0.825, + 0.232 + ], + "angle": 0, + "content": "False refusals Sometimes chat models refuse to provide an answer when the question is falsely perceived to be inappropriate (e.g. when the question asks about someone aged younger than 18)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.24, + 0.828, + 0.325 + ], + "angle": 0, + "content": "Producing full sentences Another issue we observed is that chat models would provide a full sentence answer, rather than a single word or phrase (e.g. Which year was Francisco Franco born? Produce a year only. - Francisco Franco was born in 1936). Such full-sentence answers make exact match rating impossible. The effect is not consistent across languages and happens only for some of the examples, without any discernable pattern, and therefore difficult to address completely with post-processing.[11]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.334, + 0.826, + 0.391 + ], + "angle": 0, + "content": "Spurious addition of \"answer is\" Likely due to overtraining on MMLU style tasks, Models such as OpenAI's GPT4 and Gemini 2.0 preface the vast majority of the answers in English with \"answer is\" or \"X answer is X\" where X is the desired correct response. This is remarkable, because it is essentially a repetition of the end of the prompt. However, it is easy to fix in post-processing." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.4, + 0.827, + 0.444 + ], + "angle": 0, + "content": "Japanese specific issues In Japanese, in general it is not polite to answer with incomplete sentences. As such chat models often append the copula verb \"desu\" to the answer, making exact match unsuccessful. We are able to fix this in postprocessing." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.452, + 0.826, + 0.537 + ], + "angle": 0, + "content": "Claude 3.5 Sonnet issues We were unable to make Claude 3.5 Sonnet follow the instructions to produce just an answer in English. It seemed to engage in a long chain-of-thought reasoning style response which we were unable to reliably parse. This issue only manifests in English and only with Claude. For this reason, we exclude Claude 3.5 Sonnet from our knowledge transfer results, as it would make the average lack of knowledge transfer from non-English languages to English more severe than they are." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.558, + 0.402, + 0.575 + ], + "angle": 0, + "content": "C Post-processing details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.59, + 0.825, + 0.619 + ], + "angle": 0, + "content": "We perform the following post-processing for both the reference answers and the answers produced by the model:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.631, + 0.508, + 0.646 + ], + "angle": 0, + "content": "- Remove leading and trailing whitespaces." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.652, + 0.375, + 0.667 + ], + "angle": 0, + "content": "- Remove punctuation." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.673, + 0.383, + 0.688 + ], + "angle": 0, + "content": "- Lowercase everything." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.631, + 0.508, + 0.688 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.7, + 0.67, + 0.715 + ], + "angle": 0, + "content": "We perform the following additional post-processing for pretrained models:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.727, + 0.771, + 0.742 + ], + "angle": 0, + "content": "- Remove leading \"Answer:\" or \"A:\" or the non-English equivalent from the output." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.748, + 0.509, + 0.762 + ], + "angle": 0, + "content": "- Remove everything after the first newline." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.727, + 0.771, + 0.762 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.774, + 0.672, + 0.79 + ], + "angle": 0, + "content": "We perform the following additional post-processing for postrained models:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.803, + 0.426, + 0.818 + ], + "angle": 0, + "content": "- Remove leading \"answer is:\"" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.824, + 0.825, + 0.853 + ], + "angle": 0, + "content": "- Detect the pattern \"X answer is X\", where X is the desired answer, and strip the unnecessary part in the middle." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.859, + 0.475, + 0.874 + ], + "angle": 0, + "content": "- Remove training \"desu\" in Japanese." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.803, + 0.825, + 0.874 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.885, + 0.825, + 0.912 + ], + "angle": 0, + "content": "11Using a judge-LLM may to some extent address this problem, but at the expense of other issues (e.g. Thakur et al., 2024)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.41, + 0.106 + ], + "angle": 0, + "content": "D Annotation instructions" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.122, + 0.827, + 0.166 + ], + "angle": 0, + "content": "Our annotation pipeline contains five stages: 1) locality rating, 2) question generation 3) question review, 4) question answering, and 5) translation. Below, we provide the annotation instructions for each of these stages." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.182, + 0.321, + 0.198 + ], + "angle": 0, + "content": "D.1 Locality rating" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.208, + 0.828, + 0.32 + ], + "angle": 0, + "content": "To narrow-down the initial selection of paragraphs – sampled from the top-rated Wikipedia pages of the respective locales – the first step in our annotation pipeline is locality rating. Given a paragraph, we ask annotators to rate whether the paragraph is locally relevant to the particular locale, on a likertscale from 1 to 5, where 1 refers to extremely local and relatively obscure topics very specifically related to the specific language or locale and with little international recognition and 5 to globally well-known topics. We also ask annotators to disregard pages about inappropriate or politically sensitive topics. The rubric for locality annotation can be found in Table 3. We disregard everything with a locality rating of 3 or lower." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.334, + 0.825, + 0.748 + ], + "angle": 0, + "content": "
DescriptionExample
1.Extremely local and relatively obscure. Content that is of interest only to a small, localized group, such as a specific town, region, or community. These topics are typically obscure and not widely known beyond their immediate area.Local radio stations, small town historical events, regional businesses, or niche local cultural practices.
2.Regional interest. Topics that have some relevance beyond a specific locality but are still primarily of interest within a particular region or country.State or provincial politicians, regional cuisine, local sports teams, or medium-sized companies with regional influence.
3.National Significance. Content that is widely recognized within a single country, but relatively un-known internationally.National politicians (not internationally known), popular national media figures, major corporations within a country, or significant national historical events.
4.International recognition. Topics that are recognized and have relevance in multiple countries but may not be universally known across the globe. These topics often have international influence and are likely to be covered in international media, though their impact may vary by region.International brands which may be recognized in more than one country, celebrities with some international reach, significant cultural movements, or political conflicts with some awareness on the international stage.
5.Global prominence. Content that is widely recognized and relevant across a large number of countries around the world. These topics have a global impact or appeal and are likely to be well-represented in media across diverse cultures and regions.Globally famous celebrities (e.g., Cristiano Ronaldo), multinational corporations (e.g., Apple), major world events, or universally recognized cultural icons.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.75, + 0.825, + 0.78 + ], + "angle": 0, + "content": "Table 3: Rubric for locality rating task. In the locality rating task, we ask the annotators to rate paragraphs with respect to how locally relevant the topic is to the locale." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.817, + 0.358, + 0.832 + ], + "angle": 0, + "content": "D.2 Question generation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.826, + 0.914 + ], + "angle": 0, + "content": "The second and main annotation step in our pipeline is the step in which we ask annotators to generate questions about sampled paragraphs. We ask annotators to generate a challenging question with a short answer. The answer should be easy to evaluate with string-matching metrics, the questions should not be open-ended or have many possible correct answers, be ambiguous or subjective, and the expected short answer should be concise. To ensure difficulty, we ask that answering the question" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.258 + ], + "angle": 0, + "content": "requires combining information from different parts in the accompanying text; It should not be answerable by mere regurgitation of a single sentence. We furthermore ask that the question is formulated such that its answer will not change over time (e.g. not 'How many medals has Sifan Hassan won', but 'How many medals has Sifan Hassan won between 2018 and 2022 (including)'), and that the question is answerable also without the article (e.g. not 'How many tv shows did the person in this article produce?'). To facilitate validation checks in the next round, we also ask that the question authors write a longer answer to explain how they arrived at the short answer. We also ask the question authors to annotate what is the type of the correct answer (e.g. number, name, date, etc) In the pilot, we observed that – for some languages – the vast majority of questions were questions that required some form of numerical reasoning. Because the intention of the benchmark is to address knowledge more than reasoning, we afterwards restricted the number of numerical questions to \\(10\\%\\). Similarly, we asked question authors to avoid yes/no questions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.274, + 0.331, + 0.289 + ], + "angle": 0, + "content": "D.3 Question review" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.299, + 0.825, + 0.342 + ], + "angle": 0, + "content": "In the first round of question review, we asked annotators from a different provider to judge whether the questions abide by the rules provided to the question authors. All question reviewers are native speakers. Specifically, we ask them to check if:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.352, + 0.546, + 0.366 + ], + "angle": 0, + "content": "- The question pertains to a locally relevant topic" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.37, + 0.621, + 0.385 + ], + "angle": 0, + "content": "- The question is clear and understandable, and not subjective" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.389, + 0.525, + 0.403 + ], + "angle": 0, + "content": "- The question has a clear and concise answer" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.407, + 0.825, + 0.435 + ], + "angle": 0, + "content": "- If there are multiple possible variations of the answer possible (e.g. 'Dick Schoof' / 'Minister Dick Schoof' / 'Prime Minister Dick Schoof' / etc), all versions of the answer are provided." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.44, + 0.575, + 0.454 + ], + "angle": 0, + "content": "- The question and answer are in the correct language" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.458, + 0.561, + 0.472 + ], + "angle": 0, + "content": "- The question is understandable without the article" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.476, + 0.701, + 0.491 + ], + "angle": 0, + "content": "- That the answer to the question will not likely change in the near future" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.352, + 0.825, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.502, + 0.825, + 0.585 + ], + "angle": 0, + "content": "When a question can be fixed with a minor change (e.g. add a time indication to make sure an answer will not change in the near future, or add an extra answer version), we ask the question reviewers to implement this fix and describe it. In the pilot round, we use the annotator feedback to finetune our annotation protocol and provide feedback to the question-authors. During the rest of the data collection, we simply disregard questions that are not useable as is or can be corrected with minor changes." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.601, + 0.488, + 0.617 + ], + "angle": 0, + "content": "D.4 Validation through question answering" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.627, + 0.827, + 0.78 + ], + "angle": 0, + "content": "In the last stage of our question generation pipeline, we have additional annotators answer the sourced and reviewed question. The goal of this validation task is to confirm that the questions are answerable, correct, non-ambiguous when read by individuals other than the original question author, and that all possible versions of the answers are included. For each question, we ask two additional annotators to first answer the question, using the snippets the questions were sourced from for context. After they have answered the question, they are shown the list of reference answers written by the original author of the question as well as the rational they provided, and we ask them to reflect upon the answer they gave themselves. If their answer did not match any answer in the original reference list, we ask them to either add their answer to the list if it is semantically equivalent to their own answer or indicate which answer they believe to be correct, their own or the original answer. We disregard all questions where at least one annotator disagrees with the original question author." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.798, + 0.322, + 0.814 + ], + "angle": 0, + "content": "E Related work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.829, + 0.825, + 0.913 + ], + "angle": 0, + "content": "In this paper, we introduce a new multilingual benchmark for LLMs, that we believe addresses gaps and pitfalls in existing benchmarks. We (concisely) outlined those gaps and pitfalls and mentioned several other works related to ours in the introduction of those paper. Here, we discuss multilingual evaluation of LLMs in more detail. Specifically, we discuss what datasets recent LLM releases have used for multilingual evaluation (Appendix E.1) and what other datasets and approaches they could have used but did not (Appendix E.2)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.177, + 0.089, + 0.825, + 0.276 + ], + "angle": 0, + "content": "
Claude 3.5 SonnetMGSM (Shi et al., 2023)
Gemini 2.0 FlashMentions multilingual audio, no multilingual benchmarks scores reported.
GPT4-oARC-Easy and TruthfulQA translated into five African languages (internal benchmark), Uhura-Eval (internal benchmark).
Llama 3.1MGSM (Shi et al., 2023), Multilingual MMLU (internal benchmark)
Mixtral 8x22Btranslated ARC-C, HellaSwag and MMLU (internal benchmarks)
Qwen2.5 72BM3Exam (Zhang et al., 2023), IndoMMLU (Koto et al., 2023), ruMMLU (Fenogenova et al., 2024), translated MMLU (Chen et al., 2023), Belebele (Bandarkar et al., 2024), XCOPA (Ponti et al., 2020), XWinograd (Muennighoff et al., 2023), XStoryClose (Lin et al., 2022), PAWS-X (Zhang et al., 2019), MGSM (Shi et al., 2023), Flores-101 (Goyal et al., 2022)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.28, + 0.825, + 0.325 + ], + "angle": 0, + "content": "Table 4: Multilingual evaluation of recent LLM releases, overview. We provide an overview table of the benchmark for which scores are reported in the release papers or notes of the LLMs we evaluated in this paper. Models are sorted alphabetically." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.359, + 0.523, + 0.374 + ], + "angle": 0, + "content": "E.1 Multilingual evaluation of LLMs in practice" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.384, + 0.825, + 0.509 + ], + "angle": 0, + "content": "While multilinguality is something frequently mentioned in the release papers or posts of recent LLM releases, the datasets for which they actual report scores is in most cases quite limited. Of the models that we evaluated for this paper, Gemini 2.0 Flash reported no multilingual scores at all; GPT4-o and Mixtral 8x22B report scores only on internally translated but not publicly available English benchmarks; Claude 3.5 Sonnet reports scores for only one benchmark - MGSM. MGSM is also the only publicly available benchmark for which Llama 3.1 reports scores, along with - also - an internally translated version of MMLU that is not publicly available. The only model that extensively reports multilingual benchmark values, on more than 10 benchmarks, is Qwen2.5 72B. We provide an overview of the multilingual benchmarks for which scores are reported for these models in Table 4." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.524, + 0.506, + 0.54 + ], + "angle": 0, + "content": "E.2 Multilingual evaluation options for LLMs" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.55, + 0.827, + 0.593 + ], + "angle": 0, + "content": "While, as we discuss below, there are gaps and challenges with multilingual evaluation for LLMs, there are in fact many more options than is suggested by what is reported in recent releases. Below, we discuss other options for multilingual LLM evaluation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.606, + 0.827, + 0.856 + ], + "angle": 0, + "content": "Translated English benchmarks As mentioned earlier on, benchmarks used for LLM evaluation are often translated English benchmarks. In some cases, the benchmarks were designed to evaluate only English and translated later, such as translated MMLU (e.g. Li et al., 2024; Chen et al., 2023; OpenAI, 2025; Singh et al., 2024) or MMLU-ProX (Xuan et al., 2025), MGSM (Shi et al., 2023) or MLAMA (Kassner et al., 2021). In other cases, the benchmark was multilingual at the time of its creation, but means of creation of the non-English data was through translating English sourced data, such as Belebele Bandarkar et al. (2024), Mintaka (Sen et al., 2022), or X-FACTR (Jiang et al., 2020). Taken together, translated benchmarks span quite a range of tasks, such as question answering (Artetxe et al., 2020; Lewis et al., 2020; Qi et al., 2023; Ohmer et al., 2023), natural language inference (Conneau et al., 2018), paraphrase detection (Zhang et al., 2019), general linguistic competence (Jumelet et al., 2025), reading comprehension (Artetxe et al., 2020; Bandarkar et al., 2024) and commonsense reasoning (Ponti et al., 2020), and even instruction following (He et al., 2024). With the exception of question answering and of course instruction following, however, many of these tasks have gone (somewhat) out of fashion for LLM evaluation, a trend which is mirrored also in the usage of their multilingual counterparts. As mentioned before, translated benchmarks have the advantage of containing parallel data, allowing for some form of comparability across languages, but are English-centric in content and may suffer from translationese (see e.g. Romanou et al., 2024; Chen et al., 2024, for a recent discussion of this)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Multilingual benchmarks sourced from scratch Though much rarer, there are also benchmarks that are created independently for each language they include. Clark et al. (2020) release a question answering dataset separately sourced for 11 different languages, with a protocol relatively similar" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.827, + 0.244 + ], + "angle": 0, + "content": "to ours. In a different category, Hardalov et al. (2020), Zhang et al. (2023) and Romanou et al. (2024) and Sánchez et al. (2024) do not create benchmark data, but instead collect existing exam or competition questions from official human exams. In case of Zhang et al. (2023), the exams are graduation exams of primary, middle and high school; Hardalov et al. (2020) includes official state exams taken by graduating high school students, which may contain parallel pairs in case countries allow examinations to be taken in multiple languages; Romanou et al. (2024), cover academic exams at middle and high school and university level, professional certifications and licenses, and exams to obtain regional licenses. Sánchez et al. (2024) instead focus on questions from the International Linguistic Olympiad corpus. Lastly, as part of their study Ohmer et al. (2023) create a dataset called SIMPLE FACTS, containing factual questions created through a shared template filled in with language specific factual data." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.259, + 0.828, + 0.398 + ], + "angle": 0, + "content": "Consistency evaluation A rather different approach to assess multilinguality in LLMs is to focus not on accuracy across different languages, but to consider whether predictions are consistent across languages. This tests knowledge and skill transfer between languages more explicitly. Two recent examples of studies incorporating consistency-based evaluations on factual knowledge questions are Qi et al. (2023) and Ohmer et al. (2023). Qi et al. (2023) focuseses specifically on sample-level consistency of answers across different languages, requiring existing parallel benchmarks. Ohmer et al. (2023), instead, ask models to translate benchmark questions themselves before answering them again. This can, with some caveats, be applied to any existing monolingual benchmark, but – requiring multiple steps – it is more involved as a paradigm, and is somewhat bottlenecked by the translation ability of the model to be evaluated." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.412, + 0.825, + 0.455 + ], + "angle": 0, + "content": "Translation as a proxy for multilinguality Another, more implicit method to assess multilinguality in LLMs is to evaluate their ability to translate from one language to another. This approach was famously used by Brown et al. (2020), but has not been common since." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.469, + 0.828, + 0.622 + ], + "angle": 0, + "content": "Monolingual non-English evaluation In our discussion, we have focussed on multilingual evaluation options that cover multiple other languages. After all, a benchmark to evaluate models on Bengali (e.g. Shafayat et al., 2024) or Arabic (e.g. Alwajih et al., 2024) can contribute to multilingual evaluation when combined with other benchmarks, but does not so on its own. Because such benchmarks are usually created by language experts for the respective languages, they usually target locally relevant skills and knowledge and are likely of higher quality than benchmarks created for many languages simultaneously (either through translation or from scratch). Yet, composing a suite including many languages that allows direct comparisons between languages remains challenging. We believe such benchmarks can be important for multilingual evaluation in LLMs, but will not further discuss benchmarks focussing on individual languages or very small sets of languages within one family here." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "27" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10356/15a8c18d-57d9-46bd-bbe1-f5ca7eeeb023_origin.pdf b/data/2025/2504_10xxx/2504.10356/15a8c18d-57d9-46bd-bbe1-f5ca7eeeb023_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0c185764119b73a9a50504f981750bf53e29ead8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/15a8c18d-57d9-46bd-bbe1-f5ca7eeeb023_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c52fc7232e34f1a190815c01170c1446895dacc5eeecb0e633287149a1cadafc +size 570490 diff --git a/data/2025/2504_10xxx/2504.10356/full.md b/data/2025/2504_10xxx/2504.10356/full.md new file mode 100644 index 0000000000000000000000000000000000000000..18fb2d1a4278c260e6f8d2a5084aa18c0bf4b547 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/full.md @@ -0,0 +1,444 @@ +# MultiLoKo: a multilingual local knowledge benchmark for LLMs spanning 31 languages + +Dieuwke Hupkes* Nikolay Bogoychev* + +Meta + +{dieuwkehupkes,nbogoych}@meta.com + +# Abstract + +We present MultiLoKo, a new benchmark for evaluating multilinguality in LLMs covering 31 languages. MultiLoKo consists of three partitions: a main partition consisting of 500 questions per language, separately sourced to be locally relevant to the specific language, and two translated partitions, containing human-authored translations from 30 non-English languages to English and vice versa. For comparison, we also release corresponding machine-authored translations. The data is equally distributed over two splits: a dev split and a blind, out-of-distribution test split. MultiLoKo can be used to study a variety of questions regarding the multilinguality of LLMs as well as meta-questions about multilingual benchmark creation. We compute MultiLoKo scores for 11 base and chat models marketed to be multilingual and study their average performance, their performance parity across languages, how much their ability to answer questions depends on the question language, and which languages are most difficult. None of the models we studied performs well on MultiLoKo, as indicated by low average scores as well as large differences between the best and worst scoring languages. Furthermore, we find a substantial effect of the question language, indicating suboptimal knowledge transfer between languages. Lastly, we find that using local vs English-translated data can result in differences more than 20 points for the best performing models, drastically change the estimated difficulty of some languages. For using machines instead of human translations, we find a weaker effect on ordering of language difficulty, a larger difference in model rankings, and a substantial drop in estimated performance for all models. + +# 1 Introduction + +With the growing presence and deployment of LLMs across the world, evaluating their abilities in languages other than English becomes more and more eminent. Yet, studying and evaluating multilinguality in LLMs remains a challenging enterprise, and it is hardly exaggerated to call the current state of multilingual evaluation in LLMs insufficient. Older multilingual benchmarks such as PAWS-X (Zhang et al., 2019), XNLI (Conneau et al., 2018) or XCOPA (Ponti et al., 2020) often do not fit the demands for evaluating auto-regressive models and are rarely used to evaluate recent models. Furthermore, their coverage of languages is relatively small compared to the number of languages in which LLMs are intended to be proficient. + +More often used for LLM evaluation are benchmarks translated from English, such as MGSM (translated GSM8K, Shi et al., 2023), MMMLU (tranlated MMLU, OpenAI, 2025) or (less frequently) Belebele (Bandarkar et al., 2024). These benchmarks provide good coverage over many languages, but using translated data comes with its own set of issues. One such issue is that even when human-rather than machine-authored translations are used, translated data is known to differ from native text in several ways (Clark et al., 2020). Furthermore, using translated benchmarks imposes a strong + +English-centric bias: translated data may be multilingual on the surface, it is not in its content. The benchmarks MLQA (Lewis et al., 2020) and TidyQA (Clark et al., 2020) to some extent address the issue by sourcing data separately for different languages. Even in their sourcing protocols, however, there is no explicit focus on selecting locally relevant content for the chosen languages. In addition to that, their coverage is again small compared to the above mentioned translated benchmarks. + +In response to these issues, we introduce a wide-coverage multilingual benchmark with locally-sourced questions for 31 different languages. Because the benchmark targets multilingual local knowledge, we dub it MultiLoKo. The release of MultiLoKo serves two interconnected goals: + +1) Provide a better means to evaluate multilinguality in LLMs; +2) Provide data to study the effect of various design choices in multilingual evaluation. + +To address our first goal, we create 500 questions per language, written from scratch for each language, using a sourcing protocol specifically designed to ensure local relevance of the question topics. To also reap the benefits of parallel data, we commissioned both human and machine-authored translations for all non-English questions into English and vice versa, providing a total of 15500 parallel questions, sourced across the 31 languages in the benchmark. The translated data facilitates the study of transfer between languages and also serves our second goal. By comparing the English-translated data with the locally sourced data, we can explicitly compare the adequacy of using translated benchmarks; by comparing human- with machine-authored translations, we can better estimate the potential issues of the latter. To prevent quick overfitting and inadvertent contamination, we release a development set of the benchmark, while test scores can only be obtained through an external provider.3 + +We provide elaborate analyses for both our goals. We compute average performance and language parity scores on the locally sourced data for 11 models marketed for their multilinguality (§ 5.1); we investigate whether these models exhibit knowledge transfer between different languages (§ 5.2); we study the impact of local sourcing versus translating on model rankings and language difficulty (§ 5.4.1); we analyse the difficulty of the included languages through various lenses (§ 5.3); and we conduct an analysis into the difference between human- and machine-authored translation (§ 5.4.3). + +We find that, of the models we consider, the best performing model is Gemini 2.0 Flash, with an average performance of 34.4 points, and an almost 35 point gap between the best and the worst language. Llama 3.1 405B and GPT4-o are close contenders in terms of average scores (34.3 and 34.0, respectively), but both have substantially higher language gaps (39 and 49 points). Almost across the board, model performances are better when questions are asked in the language to which the content is relevant, indicating suboptimal knowledge transfer between languages, a result that is mirrored by low response-consistency across question language. + +Next, we study the relevance of using locally sourced data as opposed to translated English data as well as whether it matters if translations are authored by humans or machines. We find that the estimated difficulty of some languages changes drastically across the two sourcing setups, within the range of 15 points decrease and 8 points increase on average across models. The rank correlation between average language difficulty score is 0.78. Furthermore, individual model scores between locally and English-translated data can differ up to 22 points for some languages. However, changing the sourcing setup does not impact model rankings, suggesting that using translated data may be suitable for comparing models but less for model development or language prioritisation. For using machine- instead of human-authored translations, as well, the effect on model ranking is limited $(R = 0.97)$ , but the difficulty estimates of various languages changes with up to 12 points. Furthermore, using machine translated data results in lower average scores for all models, with drops ranging from 2 to $34\%$ of the human-translated scores. + +Outline In the remainder of this paper, we first describe our dataset collection protocol and the dataset itself in § 2 and § 3, respectively. In § 4, we describe our experimental setup. In § 5, we present a range of different results, covering (among other things), the summary of results described above. We conclude in § 6. As we discussed quite some related work above, we do not include a separate related work section in the main paper, but we do provide a discussion of a wider range of multilingual datasets in Appendix E. + +# 2 Dataset collection + +The main data collection protocol of MultiLoKo is similar to the protocol used by the well-known benchmark SQuAD (Rajpurkar et al., 2016): we source articles from Wikipedia and ask annotators to generate questions about paragraphs sampled from these articles. After that, we run several rounds of quality control on the generated questions and commission human- and machine-authored translations of all data. Our collection protocol consists of five steps. + +Step 1: Paragraph selection The first step in our protocol is the sampling of the 6K most visited Wikipedia pages for each language for the period of 2016-2021. We sample paragraphs from those pages by randomly selecting a word in the page and expanding left and right until we reach 3K characters. Next, we ask annotators to judge the local relevance of the samples on a scale from 1 to 5, where 1 refers to topics specific to the language (e.g. a Swedish singer not known outside of Sweden) and 5 to globally well-known topics (e.g. 'Youtube'). We disregard all topics that have a locality score above 3. The full rubric and annotation instructions can be found in Appendix D.1. + +Step 2: Question generation In step 2, we ask native speakers to generate challenging questions about the content in the paragraphs. To facilitate automatic scoring, we ask that the questions are closed-form questions, with only one correct short answer. To ensure that the annotation instructions are understandable and appropriate for each locale and the questions of high quality, we run a pilot with 50 questions separately for each language. After our pilot, we commission 500 additional samples for each language, to leave a $10\%$ margin to disregard questions in the rest of the process. + +Step 3: Question review For each generated question, we ask a new set of annotators from a separate provider to judge whether the generated questions abide by the annotation instructions, to flag any possible issues, and to mark if the question is useable as is, would be useable with a small adaptation or should be disregarded. We ask annotators to fix small annotators on the spot, and as respective vendors that questions with larger issues are replaced. + +Step 4: Question answering As a last quality control step, we ask two annotators different from the creator of the question to answer the questions. In this stage, we do not ask annotators to correct questions, but we simply disregard all questions for which either annotator thinks the original answer was incorrect, or the annotator provided an answer not matching the original answer because of ambiguities in the question. The only corrections we allow in this stage are additions of additional, semantically equivalent, correct answers (e.g. 'four' as an alternative to '4'). + +Step 5: Translation Lastly, we translate the non-English data back to English and vice versa. This effort serves two purposes. First, it allows to study generalisation of knowledge and skills between English and non-English languages through a direct comparison of the same questions. Second, it facilitates inspection of the topics and questions for all languages of the dataset, without the need to be able to speak all those languages. As automatic translation of benchmarks is relatively common practice in the field (e.g. Li et al., 2024), we commission both human and machine translations and study their difference as part of our analysis. For the machine translations, we use Google Translate sentence based cloud API. $^{4}$ + +# 3 MultiLoKo the dataset + +MultiLoKo consists of three main components: i) the collected data; ii) a set of multilingual prompts to prompt base- and chat models; and iii) a set of metrics. + +# 3.1 The collected data + +The data in MultiLoKo consists of several partitions and two splits. + +Partitions MultiLoKo contains one main partition, containing locally-soured data for 31 languages, including English. In addition to that, it contains four translated partitions. Two of those are human-translated partitions: human-translated-from-english, consisting of human-authored translations of English data into the 30 other languages in MultiLoKo, + +human-translated-to-english containing human-authored translations of the non-English subsets into English. The other two are machine-translated partitions following the same pattern: machine-translated-from- english, contains machine-authored translations of English data into 30 other languages, and machine-translated-to- english contains machine-authored translations of the non-English subsets into English. All partitions contain 500 samples per language - thus in total 15500 samples in the main partition, and 15000 samples in the translated partitions. Statistics about the dataset such as the distribution over answer types and the average prompt length can be found in Appendix A. Results relating to the difficulty of the benchmark can be found in $\S 5$ . + +Splits Each partition is divided equally over two splits: a dev split that can be used for development, and a blind test split. Each of these splits thus contains 250 samples per language. Until the test split is publicly released, results can only be obtained through model submissions. The splits are not random, but constructed such that for each language the most frequently visited pages are in the dev split while the least frequently visited pages are in the test split, roughly preserving the distribution of answer types (e.g. number, name, year, etc). The test split can thus be seen as an out-of-distribution (ood) split, specifically meant to assess generalisation (which is challenging in the context of LLMs, see e.g. Hupkes et al., 2023). In § 5.4.2 we provide an analysis of the extent to which the split is truly an ood split, by analysing its difficulty. The results reported in the results section of the paper are dev results. + +# 3.2 Prompts and few-shot examples + +Running MultiLoKo requires prompts. In the spirit of getting truly multilingually appropriate results, we design prompts separately for each language and release them along with the data. The prompts are written by different linguistic experts for the various languages, in consultation with the benchmark creators to ensure they are appropriate for LLMs. We provide prompts for base models and chat models that allow for incorporating up to five few-shot examples, which we also provide.6 All prompts and few-shot examples can be found in the MultiLoKo repository. + +# 3.3 Metrics + +MultiLoKo has two main metrics and two auxiliary metrics. The two main metrics - Exact Match accuracy (EM) and Gap - capture the overall performance of MultiLoKo and are computed on the main partition, whereas the two auxiliary metrics - Mother Tongue Effect (MTE) and Locality Effect (LE) - combine information from different partitions. We provide a cheat-sheet in Table 1. + +EM and Gap EM indicates the performance of a model on a single language or averaged across languages, as measured by the percentage of times the model (after post-processing) provides an answer that verbatim matches one of the answers in the reference list. Gap, defined as the difference between the best and the worst performing language in the benchmark, is a measure of parity across the individual languages within the benchmark. Taken together, EM and Gap provide a good indication of how well a model is faring on MultiLoKo. Because both gap and EM are binary metrics that may be open to false negatives, we also considered the partial match metrics BLEU (Papineni et al., 2002), ChrF (Popovic, 2015) and contains. We did not find any novel patterns using those metrics, but include them in our implementation for future research. + +MTE Because of the 2x2 design of MultiLoKo, in which we translated non-English data back to English and vice versa, we can compute several metrics related to locality of the requested information. MTE is one of such metrics. It expresses the impact of asking a question in a language to which that question is relevant. We quantify MTE (for non-English languages only), as the difference between the EM score of the locally sourced data asked in the corresponding language (e.g. asking a question about a local Bengali radio station in Bengali) and the EM score when the same questions are asked in English. A positive MTE indicates that information is more readily available when it is relevant to the language in which it was asked, whereas a negative MTE indicates that the information is more easily accessible in English. MTE is a measure related to transfer as well as language proficiency. + +5More details can be found at https://github.com/facebookresearch/multiloko/. + +6In several recent works it has been shown that prompts can have a substantial impact on model scores (e.g. Weber et al., 2023; Mizrahi et al., 2024). Given the large number of languages in the benchmark and the fact that those are not all mastered by the main authors, we did not include a systematic search through prompts, but presented our best-effort results. + +
Average EMThe first main metric we use to quantify performance for MultiLoKo is the average Exact Match score across languages, which expresses how many of the answers match one of the gold standard answers verbatim (after post-processing the answers).
GapThe second main metric is the gap between a model's best and worst performing language. We gap to quantify the extent to which a model has achieved parity across languages. Because a small gap can be achieved both through parity on high scores as parity on low scores, it is most informative in combination with average benchmark performance.
Mother tongue effect (MTE)MTE expresses the impact of asking questions in a language in which the requested information is locally salient, compared to asking it in English. A positive MTE indicates information is more readily available in the language it was (likely) present in the training data, whereas a negative mother tongue effect indicates the information is more easily accessible in English.
Locality effect (LE)LE quantifies the effect of using locally sourced vs translated data. It is measured by computing the difference between scores for locally sourced data and translated English-sourced data. A positive LE implies that using translated English data underestimates performance on a language, a negative LE that using translated English data overestimates performance.
+ +Table 1: MultiLoKo metric cheatsheet. We use several metrics to quantify model performance using MultiLoKo. This table provides a cheatsheet for their meaning. + +LE The locality effect (LE) is a measure of how much performance on knowledge tasks is over- or underestimated through the use of using translated English data, as opposed to locally relevant data. We quantify the locality effect as the difference in EM for English translated data and locally sourced data. If for a language the English translated data has as a higher EM, the LE is positive, indicating that using English translated data likely overestimating a model's ability on providing knowledge for that language. If the LE is negative the English translated data may provide an underestimation of the score for that language. Note that because we often observe both positive and negative LEs for the 30 non-English languages in MultiLoKo, the average LE across languages may be small, even if the differences for individual languages may be large. + +# 4 Experimental setup + +We test and showcase our benchmark by running experiments with 11 different models of varying sizes, that were all marketed to have multilingual abilities. + +# 4.1 Models + +To test the extent to which MultiLoKo provides useful signal across training stages, we consider both base and chat models. The base models we include in our experiments are Llama 3.1 70B and 405B (Dubey et al., 2024), Mixtral 8x22B (team, 2024), and Qwen 2.5 72B (Qwen et al., 2025), the seven chat models are Gemini 2.0 Flash (Google DeepMind, 2024), GPT4-o (OpenAI et al., 2024), Claude 3.5 Sonnet (Anthropic, 2025), Llama 3.1 70B and 405B Chat, Mixtral 8x22B-it, and Qwen 2.5 72B instruct. As mentioned before, we run chat and base models with separate prompts. + +# 4.2 Experimental setup + +We run all of our experiments with the generation temperature set to 0. To facilitate automatic evaluation, we include an instruction to answer questions curtly and precisely, producing only a number/name/location/etc. Full template information can be found in our github repository. + +Few-shot prompting For base models we use a 5-shot prompt. For chat models, we use a 0-shot prompt, as this is the most likely use mode by chat model users. + +Post-processing Because base models are good at following the instructions, minimal postprocessing is needed: we only lowercase the output and strip punctuation. Chat models often deviate from the required format, especially in English, in various ways that we discuss in Appendix B. To evaluate such models beyond their instruction-following issues, we perform more complex post-processing, aiming to remove any words resembling "answer" from the LLM output, as well as several special cases for English and Japanese. We provide full details about post-processing in Appendix C. + +
ModelEMGapMother tongue effectLocality effect
Gemini 2.0 Flash34.39± 2.9034.806.12± 1.900.36± 3.40
Llama 3.1 405B34.31± 2.7039.206.37± 1.700.62± 2.70
GPT4-o33.97± 3.6048.803.08± 2.000.35± 2.90
Llama 3.1 405B Chat27.70± 3.2040.803.97± 2.20-1.11± 2.70
Llama 3.1 70B26.92± 2.6028.802.72± 1.70-0.30± 3.10
Claude 3.5 Sonnet26.89± 4.4047.6024.18± 4.200.81± 2.90
Llama 3.1 70B Chat21.65± 2.8042.400.49± 1.60-3.32± 3.30
Mixtral 8x22B21.64± 4.2043.60-2.18± 3.00-0.65± 2.60
Qwen2.5 72B19.66± 2.3028.402.45± 2.10-2.28± 2.70
Mixtral 8x22B-it10.10± 3.1039.20-5.41± 2.00-0.54± 1.70
Qwen2.5 72B instruct2.54± 0.708.00-1.52± 1.000.43± 0.70
+ +Table 2: Aggregate results dev. We report average EM, gap, mother tongue effect and locality effect for all 11 models on the MultiLoKo dev split. For EM, MTE and LE, we also indicate a confidence interval equal to two times the standard error across languages. Models are sorted by average EM. + +# 5 Results + +As MultiLoKo has several partitions, there are many different results that can be computed. On a high level, we consider four different types of results. First, in § 5.1, we report average model results across several categories, including the average performance and an indicator of parity across languages. Next, in § 5.2, we dive deeper into the knowledge transfer occurring from one language to another, within individual models. In § 5.3, instead, we focus on differences between individual languages. Lastly, in § 5.4, we look in more detail at the dataset itself through the lens of model results, considering in particular the effect of locally sourcing data as opposed to translating English sourced data (§ 5.4.1), differences between our dev and test split (§ 5.4.2) and the difference between using human and machine translated data (§ 5.4.3). + +# 5.1 Aggregate results: EM and language gap + +In Table 2, we provide a summary of the average dev results. Specifically, for each model, we report average EM and the gap between the best and the worst language, along with average MTE and LE, which we will discuss in a later section.7 We report average MTE, EM and LE along with a confidence interval equal to two times the standard error across languages, roughly equalling previously used $95\%$ confidence intervals (Madaan et al., 2024; Dubey et al., 2024). + +# 5.1.1 Model performance (EM) + +In Figure 1a, we show a boxplot of the distribution of the EM scores across models, ordered by average EM. The best performing models are Gemini 2.0 Flash, Llama 3.1 405B, and GPT4-o, while Mixtras 8x22B and the Qwen2.5 72B populate the lower rankings on the list. Somewhat surprisingly, base models are generally outperforming chat models on the benchmark, this is partly due to false refusals and poor instruction following in the chat models. In some cases, however, the chat models simply just provide a qualitatively different answer than the base models. The figure shows that MultiLoKo is a relatively difficult benchmark across the board: the average EM of even the best performing model barely exceeds 30, while the bottom performing models have EM scores lower than 20. Also scores for the easiest languages (see also § 5.3) are capped below 50. Furthermore, for virtually all models performance varies starkly between languages, suggesting that none of the models we considered are evenly multilingual across the 31 languages in MultiLoKo. + +# 5.1.2 Gap + +While average EM score provides some information about a model's multilingual abilities, the same EM score can hide many different patterns regarding individual language scores. As we appreciate it is not always practical to consider 31 separate EM scores in model development, we add a second + +![](images/ba1c71502e3d1b62d80b80df10464eeef839823bfd41082afe99302cc0c1fa68.jpg) +(a) EM scores + +![](images/cc7d7f7d7a0ea73f1bd888448d909a38821f0cdcc30c4807be351475caeba83c.jpg) +(b) Gap +Figure 1: EM distributions and Gap dev. (a) Boxplot of observed EM scores for each model, sorted by mean. (b) Difference between the best EM and the worst of the N next best EM scores, per model. + +summary metric to the main metrics of MultiLoKo: the gap between the best and worst performing languages, representative of the extent to which a model has achieved parity across languages. + +In Figure 1a, we already saw that the per-language scores have quite a range for all models. In Figure 1b, we study this in more detail, by considering the gap between the best language and the next N best language (30 corresponds to the full benchmark). On the right end of the plot, we see that already considering only 5 languages besides English, even the best perform has a gap of over five points - relatively large in absolute terms, very large in relative ones - between English and the worst of the remaining languages. For the second best two models, the top-5 gap even exceeds 10 points. As we include more languages, up to the full benchmark, the gap increases, with GPT4-0 showing gap of almost 50 points. The only models for which the gap is small are the models that have overall low performance and thus little space to drop from English, illustrating how gap and average EM provide complementary information about multilingual performance. + +# 5.2 Generalisation across languages + +Next, we study whether knowledge generalises across languages or, in other words, whether knowledge transfers from one language to another. + +# 5.2.1 The mother tongue effect (MTE) + +First, we compare the EM of models when questions are asked in the language for which the questions were originally sourced with performance when the same questions are asked in English. We quantify this effect with the metric MTE, which expresses the difference in performance between these two settings (see § 3.3). In Figure 2a, we show MTE per language, averaged across models. For most languages, performance is higher when the question is asked in the language for which the question is locally relevant. The languages for which MTE is negative or close to 0 are virtually all languages that perform very poorly also in the mother tongue and for which there is therefore little room for further decrease. From one perspective, the improvements when questions are asked in the low-resource but native languages can be seen as surprising: as models perform much better in English than non-English languages, one may expect performances to go up as a consequence of that. On the other hand, similar 'mother tongue effects' have been observed in earlier studies. For example, Ohmer et al. (2024) found that models are comparatively better at answering factual questions about topics when they are asked in a language to which culture the fact pertains. It appears that also in our case, + +![](images/5e9488ee42bcffe9751990decbf76ca92024b4aaa74c5ac9b05c6f80f75e0642.jpg) +(a) Average MTE across models +(b) KDE of MTE scores + +![](images/02b5749be8a0b0cad9681d40427e51c88c0bc0e81cda587783d142cb140bf240.jpg) +Figure 2: Mother tongue effect dev. (a) Per language MTE for MultiLoKo dev, indicating the difference between questions asked in the mother tongue (locally relevant) and in English. Error bars indicate 2 times standard error across all models, excluding Claude 3.5 Sonnet. (b) KDE plot of the distribution of MTE scores for the top-3 performing models. + +the effect of accessibility of information in a relevant language wins out over the generally stronger English performance, pointing to a gap in models' ability to generalise knowledge from one language to another. + +In Figure 2b, we further consider the distribution of MTE scores for the top-3 models. Interestingly, this distribution is quite different between models. Despite having comparable average scores, the top-3 performing models differ in their MTE distributions across languages. Of the three models, GPT4-o has the smallest average effect (3.2); Llama 3.1 405B has a much higher average effect (6.6), but less probability mass on the more extreme ranges of the spectrum (min max values of $[-7, +12]$ vs $[-9, +13]$ ) Gemini 2.0 Flash is in the middle in terms of average (6.3), but shows the largest variation across languages $[-10, +16]$ . + +Note, however, that without studying the actual training data of the various models, it is possible to infer that all these models have relatively poor transfer across languages, but not conclusively say that one model is better than another: it is also possible that the information sourced for languages with better MTEs was simply better represented in the English data of a respective model. + +# 5.2.2 Consistency across responses + +Another way to study transfer between languages is to look at the consistency of responses across languages (previously also used by Qi et al., 2023; Ohmer et al., 2023, i.a.). After all, it is possible for a model that has an EM of 30 on both English and another language to be nevertheless completely misaligned on which questions they respond to correctly. Studying consistency across responses can therefore be seen as a more direct way of studying whether knowledge is equally accessible across languages. Furthermore, consistency can be studied independently from accuracy, as it is possible for a model to have very good transfer, but be simply consistently wrong. + +In the dataset used by Ohmer et al. (2023), the correct answers (consisting of names, numbers and years) are identical across the languages they consider, while Qi et al. (2023) use a factual knowledge task that requires ranking outputs. Neither of their metrics can thus be directly applied in our case. Specifically, measuring consistency on incorrect responses – an important component of the work of Ohmer et al. (2023) because it can provide positive rather than negative evidence – would require assessing whether two answers in different languages are to be considered semantically equivalent, which is not practically feasible for our data. Rather, we opt for a simpler consistency metric, which quantifies what percentage of the questions that are answered correctly in either language are answered correctly in both languages. + +In Figure 3a, we show the average consistency of all models (excluding again Claude Sonnet 3.5); for completeness, we also show the per-language consistency results in Figure 3b. The results confirm our earlier conclusion that much improvements can be made when it comes to knowledge transfer + +
ModelConsistency
Gemini 2.0 Flash0.46± 0.04
Llama 3.1 405B0.46± 0.04
Llama 3.1 70B0.45± 0.03
GPT4-o0.45± 0.05
Llama 3.1 405B Chat0.42± 0.04
Qwen2.5 72B0.40± 0.04
Llama 3.1 70B Chat0.40± 0.04
Mixtral 8x22B0.36± 0.05
Mixtral 8x22B-it0.21± 0.05
Qwen2.5 72B instruct0.08± 0.03
+ +(a) Consistency scores per model + +![](images/4292a85211fc2019ae5f9eb0d70959896e6e3c18041a561b5072bafe1218ace8.jpg) +(b) Consistency scores per language +Figure 3: Consistency results dev. (a) Average per-model consistency scores, $\pm 2$ times the standard error across languages. (b) Boxplot of model consistency scores per language, indicating the relative overlap of correctly answered questions when asked in the mother tongue vs in English. + +between languages: even for the best performing models, there is an overlap of not even $50\%$ between the questions correctly answered across languages. + +# 5.3 Differences between languages + +So far, with the exception of MTE and parity scores, we have primarily looked at results averaged across languages. Now, we consider language-specific results in a bit more detail. + +![](images/5a773333e4ab35574de8f34ba1e9ec4e4d7fb66080d19020a6de0b49c9e62b74.jpg) +Figure 4: Average EM per language dev, in mother tongue and English. Top: Average EM on locally sourced data. Bottom: Average EM on locally sourced data, translated to English. + +# 5.3.1 Language difficulty on locally sourced data + +First, in Figure 4 (top), we show average model results for all languages on all locally sourced data. In broad strokes, the order of difficulty is correlated with how low- or high- resource a language is to be considered: while languages such as French, English and Spanish occur at the easier end of the spectrum, we find Farsi, Khmer and Malay among the most difficult languages. There are a few + +![](images/e5f1a6301f9eacb88d277334073c9d3bc06a6bceaf152447d2047710024fbe06.jpg) +(a) Locality effect per language +Figure 5: Locality Effect dev. (a) Per language Locality Effect, indicating the difference in assigned scores between locally sourced and translated English data. A positive LE means the locally sourced data has a higher score (is easier), a negative LE the English sourced data has a higher score. (b) Per-model rank correlation between language difficulty of languages on locally sourced vs English translated data. + +
ModelRank correlation language difficulty
Gemini 2.0 Flash0.54
Llama 3.1 405B0.65
GPT4-o0.64
Llama 3.1 405B Chat0.70
Llama 3.1 70B0.60
Claude 3.5 Sonnet0.84
Llama 3.1 70B Chat0.68
Mixtral 8x22B0.86
Qwen2.5 72B0.45
Mixtral 8x22B-it0.88
Qwen2.5 72B instruct0.55
+ +(b) Language difficulty correlations + +notable exceptions: on average the second highest scoring language in our benchmark is Tagalog. While it is difficult to judge why without doing a detailed analysis on the questions, we hypothesise that the questions asked by the Tagalog language experts are simply less complex than the questions of other languages. + +# 5.3.2 Separating language difficulty from language proficiency + +In an attempt to distinguish data difficulty from language proficiency, we consider also the difficulty of the locally sourced data translated to English. While this conflates data difficulty and transfer (see § 5.2), it still gives us some indication of the extent to which low performance in languages is caused by poor language proficiency versus data difficulty. In the bottom half of Figure 4, we show the model performances as computed on the locally sourced data translated to English. The correlation between these two language difficulty rankings between these setups is 0.79. When comparing the ranks of the various languages, only a handful of languages shift more than a few places. Specifically, Bengali $(26->4)$ , Urdu $(26->12)$ , and Hindi $(14->5)$ all decrease substantially in difficulty rank. The fact that they are comparatively easier in English suggests that for those languages proficiency may be a larger problem than data difficulty. On the other hand, only Russian $(7->21)$ shows a drop of more than 5 places. + +# 5.4 The dataset + +Lastly, we discuss two aspects related to the creation of the dataset. Specifically, we consider the impact of local sourcing vs translated English data, and we have a look at the dataset split across dev and test. We consider the difference between using human-authored as opposed to machine-authored translations. + +# 5.4.1 Locally-sourced vs translated-from-English data + +To study the impact of using locally sourced data, we consider the difference between per-language EM on locally sourced data and translated from English data. + +Language difficulty First, we look at per-language differences between locally sourced and translated English data. We quantify this difference in a metric we call the Locality Effect (LE). The size of the locality effect tells us how much the estimate of a model's strength in a particular language would have been off if we had chosen to use a translated benchmark rather than a locally sourced one. We plot this difference in Figure 5a. + +As we can see, the scores between locally and translated English-sourced data can differ quite drastically, almost 15 percentage points averaged across models. For individual models, the differences are + +even larger. For Llama 3.1 405B, the locality effect ranges from -13 to +17; for Gemini 2.0 Flash from -21 to +15; and for GPT4-o from -22 to +14. The differences are not just in absolute scores; also the ordering of language by difficulty is quite different across the two data collection setups, as can be seen by the per-model rank correlations of language difficulty between the two conditions, shown in Figure 5b. Using English-translated rather than locally sourced data does thus not only provide different estimates, but may suggest different languages to focus on for improvement. + +Model rankings Next, we consider the ranking of the models under the two different data regimes. Interestingly, given the transfer effect, changing from locally to English translated data does not make any difference in the ranking. Also in terms of absolute scores, the difference between the two data collection setups is relatively minor. At least for our type of data, it thus appears that using translated data as opposed to locally sourced data may be a reasonable setup for comparing models on average, though not for getting adequate per-language or set language prioritisation. + +# 5.4.2 The dataset split + +As mentioned in the dataset construction, we took the deliberate decision to generate a split based on topic frequency, rather than creating a random split. The aim of this out-of-distribution split is to test generalisation to topics that are more in the tail of the distribution, as well as encourage improvements in multilinguality beyond having a higher score on the specific released MultiLoKo dev set. Of course, however, because of our sourcing method, all the topics in MultiLoKo are topics on which information is available on Wikipedia. As training data, Wikipedia is often packaged as a single scrape, this may render our deliberate splitting efforts futile: the fact that a page is less visited does not make it less likely that the specific page is included in the training data. Now, we test if the dev and test split are in fact distributionally different. + +![](images/b801df6ef4552fe7a4acd00f70e7b0057dc62f87938a607d60d51710da9f7fbd.jpg) +Figure 6: Average EM, dev versus test. We show the difference in score distributions between the MultiLoKo dev and test set. The results confirm that the test set is indeed out of distribution with respect to the dev set: dev scores (upper bars) are higher across the board. + +In Figure 6, we show boxplots of dev and test EM scores for all models under consideration. The plot confirms that the split is indeed to be considered an OOD split: for virtually much all models, the test scores are lower than the dev scores. Across all models, the average dev score is 24, whereas the average test score is 21. This suggests that our test set does indeed contain more tail knowledge than the dev set, despite the aforementioned arguments regarding Wikipedia. Interestingly, this implies that Wikipedia may not be the primary source from which models learn this information. + +The difference in difficulty also has bearing on the other metrics: the parity scores (thus: the gap between the best and worst performing language) is 37 for dev vs 34 for test, suggesting that more difficult dat may to some extent hide differences between languages and therefore exemplifying the utility of considering parity along with overall performance. The mother tongue effect, on the other hand, is comparable across dev and test (1.61 vs 1.56, respectively). For the locality effect, the + +effect is less interpretable. While the average difference is substantial (-0.6 dev vs -1.9 test), there is no clear pattern discernable across languages: for some, the effect reduces, whereas for others it increases. + +
ModelRmin Δmax Δavg Δ
Gemini 2.0 Flash0.80-10.0021.604.35
Llama 3.1 405B0.83-4.4018.805.82
GPT4-o0.85-6.0021.604.46
Llama 3.1 405B Chat0.80-10.4022.403.08
Llama 3.1 70B0.77-7.6022.004.59
Claude 3.5 Sonnet0.90-9.6020.802.84
Llama 3.1 70B Chat0.87-6.0020.003.12
Mixtral 8x22B0.91-3.2020.004.13
Qwen2.5 72B0.83-4.0016.803.47
Mixtral 8x22B-it0.92-4.8012.402.41
Qwen2.5 72B instruct0.80-0.803.200.36
+ +![](images/58b5ac41956d6eb877cd3c5fb7488a56cf7367f079130be27c34bff2cd0898d3.jpg) +(b) MT vs human translations +Figure 7: Machine versus human translations dev. (a) Per-model rank correlation between language difficulty between MT and human translations, and min, max and average difference between the two conditions. (b) Difference between EM computed on human- and machine-translated data (human score - machine score), per language. + +(a) Language difficulty stats across human- and machine translations + +# 5.4.3 Human versus machine translation + +Lastly, we consider the impact of using machine- or human-authored translations. To do so, we look at the differences in EM scores between machine and human translated data for the various languages, taking the human translations as the 'gold standard' (i.e. we consider human translated EM - machine translated EM). We show the results in Figure 7. + +In Figure 7a we show the rank correlations of the difficulties of the various languages per model, as well as the min, max and average drop from human to machine translations. We see the that, at the model level, using machine translations rather than human translations results in a systematic undervaluation of the model scores: there is not a single model for which the 'drop' from human to machine translations is negative on average. In part, this is may be a result of the previously observed lack of knowledge transfer effect. That the drop is not substantially lower for models with better transfer, however, suggests that the more impactful factor is the quality of the machine translations, that may at times result in unanswerable questions. + +In terms of model rankings, the difference between machine and human translations is minor: the model rankings between the two conditions have a rank correlation of 0.97 on the dev split, with only three local swaps (2&3 and 5&6 and 8&9) of models that did not have statistically different scores to begin with. This suggests that to compare models, using machine translation can be an acceptable alternative to human translations, as the mis-estimation is systematic across models. + +Considering the effect across languages, we observe that even though the average drop is positive, for virtually all models there are at least some languages for which performance increases when MT is used, in some cases with even more than 10 points. For a handful of languages - specifically Russian, Swedish and Urdu - this is also true across models (see Figure 7b). While the overall rank correlation is high for language difficulty (0.88), it thus still urges caution in using machine translated data for language improvement prioritisation. + +# 6 Conclusion + +Notwithstanding the increasing multinational deployment of LLMs in many parts of the world, adequately evaluating their multilinguality remains a challenging enterprise. Only in part is this due to the scarcity of high-quality and broad-coverage multilingual benchmarks for LLM: perhaps a more pressing issue is that the benchmarks that are frequently used for multilingual evaluation virtually all consist of translated English data. While using completely parallel data has its advantages, using translated English data imposes an English-centric bias on the content of the benchmarks, + +implying that even if the benchmark evaluates multilinguality on the surface, it does not in content. In our work, we aim to address this by presenting MultiLoKo, a multilingual benchmark spanning 31 languages that combines the best of both worlds. MultiLoKo contains 500 questions targeting locally relevant knowledge for 31 languages, separately sourced for each language with a protocol specifically designed to ensure local relevance of the question topics. It is also fully parallel, because it contains human-authored translations of the non-English partitions into English and vice versa. As such, it allows to study various questions related to multilinguality, transfer and multilingual benchmark creation. To prevent quick overfitting and inadvertent contamination, we release a development set of the benchmark, while the test set of the benchmarks remains private, at least for the near future. + +We use MultiLoKo to analyse 4 base and 7 chat models marketed to be multilingual. We find that, of the models we consider, the best performing model is Gemini 2.0 Flash, with an average performance of 34.4 points, and an almost 35 point gap between the best and the worst language, followed by Llama 3.1 405B and GPT4-o, which are close contenders in terms of average performance but both have substantially higher language gaps (39 and 49 points). Generally, scores are better when questions are asked in the language to which they are relevant, indicating suboptimal knowledge transfer between languages, a result that is mirrored by low per-sample consistency across question language. + +On a meta-level, we study the relevance of using locally sourced data as opposed to translated English data as well as whether it matters if translations are authored by humans or machines. We find that the estimated difficulty of some languages changes drastically across the two sourcing setups, within the range of 15 points decrease and 8 points increase on average across models. The rank correlation between average language difficulty score is 0.78. Furthermore, individual model scores between locally and English-translated data can differ up to 22 points for some languages. However, changing the sourcing setup does not impact model rankings, suggesting that using translated data may be suitable for comparing models but less for model development or language prioritisation. For using machine- instead of human-authored translations, as well, the effect on model ranking is limited $(\mathrm{R} = 0.97)$ , but the difficulty estimates of various languages changes with up to 12 points. Furthermore, using machine translated data results in lower average scores for all models, with drops ranging from 2 to $34\%$ of the human-translated scores. + +While our results section is extensive already, there are still several parts of MultiLoKo that we did not explore. For instance, because of the sourcing strategy, each native question is coupled with a paragraph that contains the answer to the question. MultiLoKo could thus be transformed into a reading-comprehension benchmark, and we consider studying the difference between the knowledge and reading comprehension setup an interesting direction for future work. Furthermore, each question contains an elaborate long answer intended to explain the short answer. We have not used the long answers in any of our experiments, but foresee interesting directions including studies into CoT prompting or studying answer rationales. + +# 7 Limitations + +In this last section, we discuss various limitations of our work. + +Local relevance In our sourcing protocol, we explicitly sought to create questions locally relevant to the respective languages. It is important to notice, however, that some languages, such as English, Spanish, Portuguese, Chinese, French and to a lesser extent German and Dutch cover a wide variety of cultures. We did not separately control for that and the data for those languages thus likely comprises a mix of different locales. + +Data quality Building a bias-free evaluation datasets with few mistakes is not an easy feat. Even though we implemented several rounds of quality checks in our data collection pipeline, when looking at outputs we still incidentally found mistakes in the data or answers. We fixed some of these mistakes as we encountered them, but it is quite likely that more such mistakes occur in the dataset. It is also important to point out that we are less likely to spot such issues for languages that we do not understand at all, potentially creating a bias towards the set of languages for which we have a rudimentary understanding. Overall, however, we believe that the pipeline we designed assures a dataset of high quality. Of course, we welcome reports of mistakes spotted by others in the data. + +Evaluation Because MultiLoKo is a generative benchmark, computing scores requires comparisons of a generated answer with a set of gold answers. A first obstacle to this method of evaluation is that it is hard to create an exhaustive list of correct short-form answers. This is especially true when the correct answer is not a number, date, title or something else that can be expressed only in a few ways. In addition to that, it is hard to incentivise LLMs to produce concise answers. Even when instructed to answer with only a number / date / name / title, they may respond with a full sentence, add a reasoning trail to their answer, or add words beyond the minimal answer in a different fashion. We addressed such issues that were systematic in post-processing (see Appendix B), but it is hard to a priori catch allthe ways that LLMs may deviate from the requested protocols. In some cases, we found additional post-processing steps that increased the scores of some models only later in the process, because scores for particular languages looked suspiciously low. For instance, we had not initially realised that our punctuation stripper did not strip punctuation in Urdu, which specifically influenced GPT4-o and Gemini. We considered several other metrics as well as judges, but eventually found that EM provided the clearest and least biased signal. It remains, however, a challenge to evaluate chatty LLMs completely independently from their ability to follow instructions. + +Wikipedia as information source MultiLoKo, as several other both multilingual as well as monolingual benchmarks, uses Wikipedia as main source of information. This has the advantage that Wikipedia has a large coverage across many different languages and the information is considered to be of high quality. It also facilitates comparable sourcing across languages. Of course, it also poses limitations. For one, it still provides a bias to the specific topics that can be included, that are usually primarily knowledge based. In fact, MultiLoKo is indeed a knowledge benchmark; it does not consider other types of skills. Secondly, and perhaps more importantly, Wikipedia is a corpus frequently used in the training data of LLMs. The fact that MultiLoKo is a challenging benchmark even given that (multilingual) wikipedia is likely included in the training data of most of the LLMs evaluated suggests that this is not a large issue at the moment. However, it is very possible that MultiLoKo can be 'hacked' relatively easily simply by strongly oversampling multilingual wikipedia data. + +# Acknowledgements + +While this paper knows only two authors, this benchmark would not have been possible without the support and contributions of many people. We wish to thank all of them in this last section. First, we thank Van Phung, Kriz Chan, Antonio Gai, Dunant Hin and Emily Du for their support on facilitating and streamlining interactions with vendors for the data collection process, and Milena Hoffman for her indispensable administrative support in managing the data collection process. We would furthermore like to thank Van Phung and Kriz Chan for their continued help on ensuring data quality, saliency checking output, brainstorming and general support throughout the creation of the benchmark. + +We also thank the linguists that helped us for their contributions to the analysis of the pilot questions in the benchmark, which played an important role in finetuning and improving our annotation protocol as well as disregard inappropriate questions, and for helping us design prompt templates to allow language-specific querying of models in different stages for each of the languages in MultiLoKo. Specifically, we would like to thank Abdul Haque (Urdu), Aleksandra Antokhina (Russian), Ananya Banerjee (Bengali), Firman Tahar (Indonesian), Florian Mouret (French), Francisco Paredes Maldonado (Spanish), Eriko Nakamura (Japanese), Julie Lee (Korean), Khanh Tien (Vietnamese), Miao Yeh (Traditional Chinese), Renata Barboza (Portuguese), Rishabh Goel (Hindi), Sanket Suhas Satope (Marathi), Sara Martellini (Italian) and Silvia Aponte (German). We thank Kriz Chan by streamlining our collaboration with these linguists, and Maria Paez Playa for offering her teams time on this enterprise. We furthermore thank Sabrina Qiao for providing resources for quick-turnaround QA support, and Ateeq Awan (English), Kaila Conley-Coversi (Italian), Semanti Roy (Bengali) and Shahmir Shaikh (English) for delivering this QA support. + +As doing manual saliency checks is challenging for a multilingual benchmark, we also relied on the help of several colleagues to debug small issues, detect errors in questions and prompts and double check annotation judgements. We would like to thank Anna Prochowska, Daria Dudurca, Diego Perino, Etai Sella, Ivan John Piramide, Lovish Madaan, Yanir Kleiman for their help on this. + +# References + +Fakhraddin Alwajih, Gagan Bhatia, and Muhammad Abdul-Mageed. Dallah: A dialect-aware multimodal large language model for Arabic. In Nizar Habash, Houda Bouamor, Ramy Eskander, Nadi Tomeh, Ibrahim Abu Farha, Ahmed Abdelali, Samia Touileb, Injy Hamed, Yaser Onaizan, Bashar Alhafni, Wissam Antoun, Salam Khalifa, Hatem Haddad, Imed Zitouni, Badr AlKhamissi, Rawan Almatham, and Khalil Mrini, editors, Proceedings of The Second Arabic Natural Language Processing Conference, pages 320-336, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.arabicnlp-1.27. URL https://aclanthology.org/2024.arabicnlp-1.27/. +Anthropic. Claude 3.5 sonnet. https://www.anthropic.com/news/claude-3-5-sonnet, 2025. Accessed: 2025-04-11. +Mikel Artetxe, Sebastian Ruder, and Dani Yogatama. On the cross-lingual transferability of monolingual representations. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault, editors, Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4623–4637, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.421. URL https://aclanthology.org/2020.acl-main.421/. +Lucas Bandarkar, Davis Liang, Benjamin Muller, Mikel Artetxe, Satya Narayan Shukla, Donald Husa, Naman Goyal, Abhinandan Krishnan, Luke Zettlemoyer, and Madian Khabsa. The belebele benchmark: a parallel reading comprehension dataset in 122 language variants. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 749-775, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.44. URL https://aclanthology.org/2024.acl-long.44/. +Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin, editors, Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/1457c0d6bcbd4967418bf8ac142f64a-Abstract.html. +Pinzhen Chen, Simon Yu, Zhicheng Guo, and Barry Haddow. Is it good data for multilingual instruction tuning or just bad multilingual evaluation for large language models? CoRR, abs/2406.12822, 2024. URL https://doi.org/10.48550/arXiv.2406.12822. +Zhihong Chen, Shuo Yan, Juhao Liang, Feng Jiang, Xiangbo Wu, Fei Yu, Guiming Hardy Chen, Junying Chen, Hongbo Zhang, Li Jianquan, et al. Multilingualsift: Multilingual supervised instruction fine-tuning, 2023. URL https://arxiv.org/pdf/2412.15115. +Jonathan H. Clark, Eunsol Choi, Michael Collins, Dan Garrette, Tom Kwiatkowski, Vitaly Nikolaev, and Jennimaria Palomaki. TyDi QA: A benchmark for information-seeking question answering in typologically diverse languages. Transactions of the Association for Computational Linguistics, 8: 454-470, 2020. doi: 10.1162/tacl_a_00317. URL https://aclanthology.org/2020.tacl-1. 30/. +Alexis Conneau, Rudy Rinott, Guillaume Lample, Adina Williams, Samuel Bowman, Holger Schwenk, and Veselin Stoyanov. XNLI: Evaluating cross-lingual sentence representations. In Ellen Riloff, David Chiang, Julia Hockenmaier, and Jun'ichi Tsujii, editors, Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2475–2485, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1269. URL https://aclanthology.org/D18-1269/. + +Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurélien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Rozière, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Allonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Grégoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel M. Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, and et al. The llama 3 herd of models. CoRR, abs/2407.21783, 2024. doi: 10.48550/ARXIV.2407.21783. URL https://doi.org/10.48550/arXiv.2407.21783. +Alena Fenogenova, Artem Chervyakov, Nikita Martynov, Anastasia Kozlova, Maria Tikhonova, Albina Akhmetgareeva, Anton Emelyanov, Denis Shevelev, Pavel Lebedev, Leonid Sinev, Ulyana Isaeva, Katerina Kolomeytseva, Daniil Moskovskiy, Elizaveta Goncharova, Nikita Savushkin, Polina Mikhailova, Anastasia Minaeva, Denis Dimitrov, Alexander Panchenko, and Sergey Markov. MERA: A comprehensive LLM evaluation in Russian. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9920–9948, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.534. URL https://aclanthology.org/2024.acl-long.534/. +Google DeepMind. Google gemini ai update - December 2024. https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/, 2024. Accessed: 2025-04-11. +Naman Goyal, Cynthia Gao, Vishrav Chaudhary, Peng-Jen Chen, Guillaume Wenzek, Da Ju, Sanjana Krishnan, Marc'Aurelio Ranzato, Francisco Guzmán, and Angela Fan. The Flores-101 evaluation benchmark for low-resource and multilingual machine translation. Transactions of the Association for Computational Linguistics, 10:522-538, 2022. doi: 10.1162/tacl_a_00474. URL https://aclanthology.org/2022.tacl-1.30/. +Momchil Hardalov, Todor Mihaylov, Dimitrina Zlatkova, Yoan Dinkov, Ivan Koychev, and Preslav Nakov. EXAMS: A multi-subject high school examinations dataset for cross-lingual and multilingual question answering. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5427-5444, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.438. URL https://aclanthology.org/2020.emnlp-main.438/. +Yun He, Di Jin, Chaoqi Wang, Chloe Bi, Karishma Mandyam, Hejia Zhang, Chen Zhu, Ning Li, Tengyu Xu, Hongjiang Lv, Shruti Bhosale, Chenguang Zhu, Karthik Abinav Sankararaman, Eryk Helenowski, Melanie Kambadur, Aditya Tayade, Hao Ma, Han Fang, and Sinong Wang. Multi-if: Benchmarking llms on multi-turn and multilingual instructions following. CoRR, abs/2410.15553, 2024. doi: 10.48550/ARXIV.2410.15553. URL https://doi.org/10.48550/arXiv.2410.15553. +Dieuwke Hupkes, Mario Giulianielli, Verna Dankers, Mikel Artetxe, Yanai Elazar, Tiago Pimentel, Christos Christodoulopoulos, Karim Lasri, Naomi Saphra, Arabella Sinclair, et al. A taxonomy and review of generalization research in nlp. Nature Machine Intelligence, 5(10):1161-1174, 2023. +Zhengbao Jiang, Antonios Anastasopoulos, Jun Araki, Haibo Ding, and Graham Neubig. X-FACTR: Multilingual factual knowledge retrieval from pretrained language models. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical + +Methods in Natural Language Processing (EMNLP), pages 5943-5959, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.479. URL https://aclanthology.org/2020.emnlp-main.479/. +Jaap Jumelet, Leonie Weissweiler, and Arianna Bisazza. Multiblimp 1.0: A massively multilingual benchmark of linguistic minimal pairs. CoRR, abs/2504.02768, 2025. URL https://doi.org/10.48550/arXiv.2504.02768. +Nora Kassner, Philipp Duffer, and Hinrich Schütze. Multilingual LAMA: Investigating knowledge in multilingual pretrained language models. In Paola Merlo, Jorg Tiedemann, and Reut Tsarfaty, editors, Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 3250-3258, Online, April 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.eacl-main.284. URL https://aclanthology.org/2021.eacl-main.284/. +Fajri Koto, Nurul Aisyah, Haonan Li, and Timothy Baldwin. Large language models only pass primary school exams in Indonesia: A comprehensive test on IndoMMLU. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12359-12374, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.760. URL https://aclanthology.org/2023.emnlp-main.760/. +Patrick Lewis, Barlas Oguz, Rudy Rinnott, Sebastian Riedel, and Holger Schwenk. MLQA: Evaluating cross-lingual extractive question answering. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault, editors, Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7315–7330, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.653. URL https://aclanthology.org/2020.acl-main.653/. +Haonan Li, Yixuan Zhang, Fajri Koto, Yifei Yang, Hai Zhao, Yeyun Gong, Nan Duan, and Timothy Baldwin. CMMLU: Measuring massive multitask language understanding in Chinese. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 11260–11285, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.671. URL https://aclanthology.org/2024.findings-acl.671/. +Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, and Xian Li. Few-shot learning with multilingual generative language models. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 9019-9052, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.616. URL https://aclanthology.org/2022.emnlp-main.616/. +Lovish Madaan, Aaditya K Singh, Ryan Schaeffer, Andrew Poulton, Sanmi Koyejo, Pontus Stenetorp, Sharan Narang, and Dieuwke Hupkes. Quantifying variance in evaluation benchmarks. arXiv preprint arXiv:2406.10229, 2024. +Moran Mizrahi, Guy Kaplan, Dan Malkin, Rotem Dror, Dafna Shahaf, and Gabriel Stanovsky. State of what art? a call for multi-prompt LLM evaluation. Transactions of the Association for Computational Linguistics, 12:933-949, 2024. doi: 10.1162/tacl_a_00681. URL https://aclanthology.org/2024.tacl-1.52/. +Niklas Muennighoff, Thomas Wang, Lintang Sutawika, Adam Roberts, Stella Biderman, Teven Le Scao, M Saiful Bari, Sheng Shen, Zheng Xin Yong, Hailey Schoelkopf, Xiangru Tang, Dragomir Radev, Alham Fikri Aji, Khalid Almubarak, Samuel Albanie, Zaid Alyafeai, Albert Webson, Edward Raff, and Colin Raffel. Crosslingual generalization through multitask finetuning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15991-16111, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.891. URL https://aclanthology.org/2023.acl-long.891/. + +Xenia Ohmer, Elia Bruni, and Dieuwke Hupkes. Separating form and meaning: Using self-consistency to quantify task understanding across multiple senses. In Sebastian Gehrmann, Alex Wang, João Sedoc, Elizabeth Clark, Kaustubh Dhole, Khyathi Raghavi Chandu, Enrico Santus, and Hoorman Sedghamiz, editors, Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM), pages 258-276, Singapore, December 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023.gem-1.22/. +Xenia Ohmer, Elia Bruni, and Dieuwke Hupke. From form(s) to meaning: Probing the semantic depths of language models using multisense consistency. Computational Linguistics, 50(4):1507-1556, 12 2024. ISSN 0891-2017. doi: 10.1162/coli_a_00529. URL https://doi.org/10.1162/coli_a_00529. +OpenAI. Mmmlu dataset. https://huggingface.co/datasets/openai/MMMLU, 2025. Accessed: 2025-04-11. +OpenAI,., Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, Aleksander Madry, Alex Baker-Whitcomb, Alex Beutel, Alex Borzunov, Alex Carney, Alex Chow, Alex Kirillov, Alex Nichol, Alex Paine, Alex Renzin, Alex Tachard Passos, Alexander Kirillov, Alex Christakis, Alexis Conneau, Ali Kamali, Allan Jabri, Allison Moyer, Allison Tam, Amadou Crookes, Amin Tootoochian, Amin Tootoonchian, Ananya Kumar, Andrea Vallone, Andrej Karpathy, Andrew Braunstein, Andrew Cann, Andrew Codispoti, Andrew Galu, Andrew Kondrich, Andrew Tulloch, Andrey Mishchenko, Angela Baek, Angela Jiang, Antoine Pelisse, Antonia Woodford, Anuj Gosalia, Arka Dhar, Ashley Pantuliano, Avi Nayak, Avital Oliver, Barret Zoph, Behrooz Ghorbani, Ben Leimberger, Ben Rossen, Ben Sokolowsky, Ben Wang, Benjamin Zweig, Beth Hoover, Blake Samic, Bob McGrew, Bobby Spero, Bogo Giertler, Bowen Cheng, Brad Lightcap, Brandon Walkin, Brendan Quinn, Brian Guarraci, Brian Hsu, Bright Kellogg, Brydon Eastman, Camillo Lugaresi, Carroll Wainwright, Cary Bassin, Cary Hudson, Casey Chu, Chad Nelson, Chak Li, Chan Jun Shern, Channing Conger, Charlotte Barette, Chelsea Voss, Chen Ding, Cheng Lu, Chong Zhang, Chris Beaumont, Chris Hallacy, Chris Koch, Christian Gibson, Christina Kim, Christine Choi, Christine McLeavey, Christopher Hesse, Claudia Fischer, Clemens Winter, Coley Czarnecki, Colin Jarvis, Colin Wei, Constantin Koumouzelis, Dane Sherburn, Daniel Kappler, Daniel Levin, Daniel Levy, David Carr, David Farhi, David Mely, David Robinson, David Sasaki, Kenny Jin, Dev Valladares, Dimitris Tsipras, Doug Li, Duc Phong Nguyen, Duncan Findlay Edede Oiwoh, Edmund Wong Ehsan Asdar Elizabeth Proehl Elizabeth Yang Eric Antonow Eric Kramer Eric Peterson Eric Sigler Eric Wallace Eugene Brevdo Evan Mays Farzad Khorasani Felipe Petroski Such Filippo Raso Francis Zhang Fred von Lohmann Freddie Sult Gabriel Goh Gene Oden Geoff Salmon Giulio Starace Greg Brockman Hadi Salman Haiming Bao Haitang Hu Hannah Wong Haoyu Wang Heather Schmidt Heather Whitney Heewoo Jun Hendrik Kirchner Henrique Ponde de Oliveira Pinto Hongyu Ren Huiwen Chang Hyung Won Chung Ian Kivlichan Ian O'Connell Ian O'Connell Ian Osband Ian Silber Ian Sohl Ibrahim Okuyucu Ikai Lan Ilya Kostrikov Ilya Sutskever Ingmar Kanitscheider Ishaan Gulrajani Jacob Coxon Jacob Menick Jakub Pachocki James Aung James Betker James Crooks James Lennon Jamie Kiros Jan Leike Jane Park Jason Kwon Jason Phang Jason Teplitz Jason Wei Jason Wolfe Jay Chen Jeff Harris Jenia Varavva Jessica Gan Lee Jessica Shieh Ji Lin Jiahui Yu Jiayi Weng Jie Tang Jieqi Yu Joanne Jang Joaquin Quinonero Candela Joe Beutler Joe Landers Joel Parish Johannes Heidecke John Schulman Jonathan Lachman Jonathan McKay Jonathan Uesato Jonathan Ward Jong Wook Kim Joost Huizinga Jordan Sitkin Jos Kraaijeveld Josh Gross Josh Kaplan Josh Snyder Joshua Achiam Joy Jiao Joyce Lee Juntang Zhuang Justyn Harriman Kai Fricke Kai Hayashi Karan Singhal Katy Shi Kevin Karthik Kayla Wood Kendra Rimbach Kenny Hsu Kenny Nguyen Keren Gu-Lemberg Kevin Button Kevin Liu Kiel Howe Krithika Muthukumar Kyle Luther Lama Ahmad Larry Kai Lauren Itow Lauren Workman Leher Pathak Leo Chen Li Jing Lia Guy Liam Fedus Liang Zhou Lien Mamitsuka Lilian Weng Lindsay McCallum Lindsey Held Long Ouyang Louis Feuvrier Lu Zhang Lukas Kondraciuk Lukasz Kaiser Luke Hewitt Luke Metz Lyric Doshi Mada Aflak Maddie Simens Madelaine Boyd Madeleine Thompson Marat Dukhan Mark Chen Mark Gray Mark Hudnall Marvin Zhang Marwan Aljubeh Mateusz Litwin Matthew Zeng Max Johnson Maya Shetty Mayank Gupta Meghan Shah Mehmet Yatbaz Meng Jia Yang Mengchao Zhong Mia Glaese Mianna Chen Michael Janner Michael Lampe Michael Petrov Michael Wu Michele Wang Michelle Fradin Michelle Pokrass Miguel Castro Miguel Oom Temudo de Castro Mikhail Pavlov Miles + +Brundage, Miles Wang, Minal Khan, Mira Murati, Mo Bavarian, Molly Lin, Murat Yesildal, Nacho Soto, Natalia Gimelshein, Natalie Cone, Natalie Staudacher, Natalie Summers, Natan LaFontaine, Neil Chowdhury, Nick Ryder, Nick Stathas, Nick Turley, Nik Tezak, Nik Felix, Nithanth Kudige, Nitish Keskar, Noah Deutsch, Noel Bundick, Nora Puckett, Ofir Nachum, Ola Okelola, Oleg Boiko, Oleg Murk, Oliver Jaffe, Olivia Watkins, Olivier Godement, Owen Campbell-Moore, Patrick Chao, Paul McMillan, Pavel Belov, Peng Su, Peter Bak, Peter Bakkum, Peter Deng, Peter Dolan, Peter Hoeschele, Peter Welinder, Phil Tillet, Philip Pronin, Philippe Tillet, Prafulla Dhariwal, Qiming Yuan, Rachel Dias, Rachel Lim, Rahul Arora, Rajan Troll, Randall Lin, Rapha Gontijo Lopes, Raul Puri, Reah Miyara, Reimar Leike, Renaud Gaubert, Reza Zamani, Ricky Wang, Rob Donnelly, Rob Honsby, Rocky Smith, Rohan Sahai, Rohit Ramchandani, Romain Huet, Rory Carmichael, Rowan Zellers, Roy Chen, Ruby Chen, Ruslan Nigmatullin, Ryan Cheu, Saachi Jain, Sam Altman, Sam Schoenholz, Sam Toizer, Samuel Miserendino, Sandhini Agarwal, Sara Culver, Scott Ethersmith, Scott Gray, Sean Grove, Sean Metzger, Shamez Hermani, Shantanu Jain, Shengjia Zhao, Sherwin Wu, Shino Jomoto, Shirong Wu, Shuaiqi, Xia, Sonia Phene, Spencer Papay, Srinivas Narayanan, Steve Coffey, Steve Lee, Stewart Hall, Suchir Balaji Tal Broda Tal Stramer, Tao Xu, Tarun Gogineni, Taya Christianson, Ted Sanders, Tejal Patwardhan, Thomas Cunninghamman, Thomas Degry, Thomas Dimson, Thomas Raoux, Thomas Shadwell, Tianhao Zheng Todd Underwood,Todor Markov,Toki Sherbakov,Tom Rubin Tom Stasi Tomer Kaftan. Tristan Heywood,Troy Peterson,Tyce Walters,Tyna Eloundou,V Valerie Qi,Veit Moeller,Vinnie Monaco,Vishal Kuo,Vlad Fomenko,Wayne ChangWeiyi ZhengWenda ZhouWesam Manassra Will Sheu Wojciech Zaremba,Yash Patil Yilei Qian Yongjik Kim Youlong ChengYu Zhang. Yuchen He,Yuchen Zhang,Yujia Jin,Yunxing Dai,and Yury Malkov.Gpt-4o system card2024. URL https://arxiv.org/abs/2410.21276. +Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Pierre Isabelle, Eugene Charniak, and Dekang Lin, editors, Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA, July 2002. Association for Computational Linguistics. doi: 10.3115/1073083.1073135. URL https://aclanthology.org/P02-1040/. +Edoardo Maria Ponti, Goran Glavaš, Olga Majewska, Qianchu Liu, Ivan Vulić, and Anna Korhonen. XCOPA: A multilingual dataset for causal commonsense reasoning. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2362-2376, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.185. URL https://aclanthology.org/2020.emnlp-main.185/. +Maja Popovic. chrF: character n-gram F-score for automatic MT evaluation. In Ondrej Bojar, Rajan Chatterjee, Christian Federmann, Barry Haddow, Chris Hokamp, Matthias Huck, Varvara Logacheva, and Pavel Pecina, editors, Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392-395, Lisbon, Portugal, September 2015. Association for Computational Linguistics. doi: 10.18653/v1/W15-3049. URL https://aclanthology.org/W15-3049/. +Jirui Qi, Raquel Fernández, and Arianna Bisazza. Cross-lingual consistency of factual knowledge in multilingual language models. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 10650-10666, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.658. URL https://aclanthology.org/2023.emnlp-main.658/. +Qwen,.; An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tianyi Tang, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115. +Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. SQuAD: 100,000+ questions for machine comprehension of text. In Jian Su, Kevin Duh, and Xavier Carreras, editors, Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2383-2392, Austin, Texas, November 2016. Association for Computational Linguistics. doi: 10.18653/v1/D16-1264. URL https://aclanthology.org/D16-1264/. + +Angelika Romanou, Negar Foroutan, Anna Sotnikova, Zeming Chen, Sree Harsha Nelaturu, Shivalika Singh, Rishabh Maheshwary, Micol Altomare, Mohamed A. Haggag, Snegha A, Alfonso Amayuelas, Azril Hafizi Amirudin, Viraat Aryabumi, Danylo Boiko, Michael Chang, Jenny Chim, Gal Cohen, Aditya Kumar Dalmia, Abraham Diress, Sharad Duwal, Daniil Dzenhaliou, Daniel Fernando Erazo Florez, Fabian Farestam, Joseph Marvin Imperial, Shayekh Bin Islam, Perttu Isotalo, Maral Jabbarishiviari, Borje F. Karlsson, Eldar Khalilov, Christopher Klamm, Fajri Koto, Dominik Krzeminski, Gabriel Adriano de Melo, Syrielle Montariol, Yiyang Nan, Joel Niklaus, Jekaterina Novikova, Johan Samir Obando Ceron, Debjit Paul, Esther Ploeger, Jebish Purbey, Swati Rajwal, Selvan Sunitha Ravi, Sara Rydell, Roshan Santhosh, Drishti Sharma, Marjana Prifti Skenduli, Arshia Soltani Moakhar, Bardia Soltani Moakhar, Ran Tamir, Ayush Kumar Tarun, Azmine Toushik Wasi, Thenuka Ovin Weerasinghe, Serhan Yilmaz, Mike Zhang, Imanol Schlag, Marzieh Fadaee, Sara Hooker, and Antoine Bosselut. INCLUDE: evaluating multilingual language understanding with regional knowledge, 2024. URL https://doi.org/10.48550/arXiv.2411.19799. +Eduardo Sánchez, Belen Alastruey, Christophe Ropers, Pontus Stenetorp, Mikel Artetxe, and Marta R. Costa-jussà. Linguini: A benchmark for language-agnostic linguistic reasoning. CoRR, abs/2409.12126, 2024. doi: 10.48550/ARXIV.2409.12126. URL https://doi.org/10.48550/arXiv.2409.12126. +Priyanka Sen, Alham Fikri Aji, and Amir Saffari. Mintaka: A complex, natural, and multilingual dataset for end-to-end question answering. In Nicoletta Calzolari, Chu-Ren Huang, Hansaem Kim, James Pustejovsky, Leo Wanner, Key-Sun Choi, Pum-Mo Ryu, Hsin-Hsi Chen, Lucia Donatelli, Heng Ji, Sadao Kurohashi, Patrizia Paggio, Nianwen Xue, Seokhwan Kim, Younggyun Hahm, Zhong He, Tony Kyungil Lee, Enrico Santus, Francis Bond, and Seung-Hoon Na, editors, Proceedings of the 29th International Conference on Computational Linguistics, pages 1604-1619, Gyeongju, Republic of Korea, October 2022. International Committee on Computational Linguistics. URL https://aclanthology.org/2022.coling-1.138/. +Sheikh Shafayat, H Hasan, Minhajur Mahim, Rifki Putri, James Thorne, and Alice Oh. BEnQA: A question answering benchmark for Bengali and English. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1158-1177, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10. 18653/v1/2024-findings-acl.68. URL https://aclanthology.org/2024-findings-acl.68/. +Freda Shi, Mirac Suzgun, Markus Freitag, Xuezhi Wang, Suraj Srivats, Soroush Vosoughi, Hyung Won Chung, Yi Tay, Sebastian Ruder, Denny Zhou, Dipanjan Das, and Jason Wei. Language models are multilingual chain-of-thought reasoners. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. URL https://openreview.net/forum?id=fR3wGck-IXp. +Shivalika Singh, Angelika Romanou, Clémentine Fourrier, David Ifeoluwa Adelani, Jian Gang Ngui, Daniel Vila-Suero, Peerat Limkonchotiwat, Kelly Marchisio, Wei Qi Leong, Yosephine Susanto, Raymond Ng, Shayne Longpre, Wei-Yin Ko, Madeline Smith, Antoine Bosselut, Alice Oh, Andre F. T. Martins, Leshem Choshen, Daphne Ippolito, Enzo Ferrante, Marzieh Fadaee, Beyza Ermis, and Sara Hooker. Global MMLU: understanding and addressing cultural and linguistic biases in multilingual evaluation. CoRR, abs/2412.03304, 2024. doi: 10.48550/ARXIV.2412.03304. URL https://doi.org/10.48550/arXiv.2412.03304. +Mistral AI team. Cheaper, better, faster, stronger, 2024. URL https://mistral.ai/news/mixtral-8x22b. Accessed: 4-Apr-2025. +Aman Singh Thakur, Kartik Choudhary, Venkat Srinik Ramayapally, Sankaran Vaidyanathan, and Dieuwke Hupkes. Judging the judges: Evaluating alignment and vulnerabilities in Ilms-as-judges. CoRR, abs/2406.12624, 2024. URL https://doi.org/10.48550/arXiv.2406.12624. +Lucas Weber, Elia Bruni, and Dieuwke Hupkes. Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning. In Jing Jiang, David Reitter, and Shumin Deng, editors, Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL), pages 294-313, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.conll-1.20. URL https://aclanthology.org/2023.conll-1.20/. + +Weihao Xuan, Rui Yang, Heli Qi, Qingcheng Zeng, Yunze Xiao, Yun Xing, Junjue Wang, Huitao Li, Xin Li, Kunyu Yu, Nan Liu, Qingyu Chen, Douglas Teodoro, Edison Marrese-Taylor, Shijian Lu, Yusuke Iwasawa, Yutaka Matsuo, and Irene Li. Mmlu-prox: A multilingual benchmark for advanced large language model evaluation. CoRR, abs/2503.10497, 2025. URL https://doi.org/10.48550/arXiv.2503.10497. +Wenxuan Zhang, Mahani Aljunied, Chang Gao, Yew Ken Chia, and Lidong Bing. M3exam: A multilingual, multimodal, multilevel benchmark for examining large language models. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine, editors, Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/117c5c8622b0d539f74f6d1fb082a2e9-Abstract-Datasets_and_Benchmarks.html. +Yuan Zhang, Jason Baldridge, and Luheng He. PAWS: Paraphrase adversaries from word scrambling. In Jill Burstein, Christy Doran, and Thamar Solorio, editors, Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1298-1308, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1131. URL https://aclanthology.org/N19-1131/. + +# A Additional dataset statistics + +For reference, we provide a few dataset statistics beyond the main results in the paper. + +![](images/0ee44851e6330b28bb51edddb359d3b46ffd35f998c59f2c4c70256478702034.jpg) +Figure 8: Distribution of output types on the dev split. We show the normalised distribution of correct output types across languages, ordered (from bottom to top) by average frequency. Rare output types that occur only a few times are mapped to the category 'other'. + +Output type distribution In Figure 8, we show the per-language distribution of output types for MultiLoKo dev split.10 We mapped very rare output types, such as 'a quantity', 'a period of time' or 'letter' to 'other', for plotting purposes. We can see that name is the most common output type across languages, followed by the generic output type a word and number. Also place and date are relatively common output types, whereas most other output types occur very infrequently or only for a handful of languages. + +![](images/bd414948b1fc64eac369d890930e1fd6fdc455be1d18c2df3bdd0cd54887fd29.jpg) +Figure 9: Average question and answer lengths. We show the per-question average length (in words) of the locally-sourced questions and answers, human-translated into English. + +Input and output length In addition to that, we show the average question – and output lengths of human-translated the locally sourced questions to English in Figure 9. While there is some variation in particular in question length, the lengths of the answers are relatively consistent. The average answer length is around 2, combining one-word answers with (usually) longer names. + +# B Instruction following + +To facilitate evaluation, we instruct models to answer question with only a number/place/etc. Overall, we found that base models (with a five-shot template) are much better at abiding by this instruction than chat models, which exhibit a number of pathologies. While some of those can be caught with appropriate post-processing (see Appendix C, this is not the case for all issues. Below, we provide a summary of the main instruction-following issues we encountered with chat models. + +False refusals Sometimes chat models refuse to provide an answer when the question is falsely perceived to be inappropriate (e.g. when the question asks about someone aged younger than 18). + +Producing full sentences Another issue we observed is that chat models would provide a full sentence answer, rather than a single word or phrase (e.g. Which year was Francisco Franco born? Produce a year only. - Francisco Franco was born in 1936). Such full-sentence answers make exact match rating impossible. The effect is not consistent across languages and happens only for some of the examples, without any discernable pattern, and therefore difficult to address completely with post-processing.[11] + +Spurious addition of "answer is" Likely due to overtraining on MMLU style tasks, Models such as OpenAI's GPT4 and Gemini 2.0 preface the vast majority of the answers in English with "answer is" or "X answer is X" where X is the desired correct response. This is remarkable, because it is essentially a repetition of the end of the prompt. However, it is easy to fix in post-processing. + +Japanese specific issues In Japanese, in general it is not polite to answer with incomplete sentences. As such chat models often append the copula verb "desu" to the answer, making exact match unsuccessful. We are able to fix this in postprocessing. + +Claude 3.5 Sonnet issues We were unable to make Claude 3.5 Sonnet follow the instructions to produce just an answer in English. It seemed to engage in a long chain-of-thought reasoning style response which we were unable to reliably parse. This issue only manifests in English and only with Claude. For this reason, we exclude Claude 3.5 Sonnet from our knowledge transfer results, as it would make the average lack of knowledge transfer from non-English languages to English more severe than they are. + +# C Post-processing details + +We perform the following post-processing for both the reference answers and the answers produced by the model: + +- Remove leading and trailing whitespaces. +- Remove punctuation. +- Lowercase everything. + +We perform the following additional post-processing for pretrained models: + +- Remove leading "Answer:" or "A:" or the non-English equivalent from the output. +- Remove everything after the first newline. + +We perform the following additional post-processing for postrained models: + +- Remove leading "answer is:" +- Detect the pattern "X answer is X", where X is the desired answer, and strip the unnecessary part in the middle. +- Remove training "desu" in Japanese. + +# D Annotation instructions + +Our annotation pipeline contains five stages: 1) locality rating, 2) question generation 3) question review, 4) question answering, and 5) translation. Below, we provide the annotation instructions for each of these stages. + +# D.1 Locality rating + +To narrow-down the initial selection of paragraphs – sampled from the top-rated Wikipedia pages of the respective locales – the first step in our annotation pipeline is locality rating. Given a paragraph, we ask annotators to rate whether the paragraph is locally relevant to the particular locale, on a likertscale from 1 to 5, where 1 refers to extremely local and relatively obscure topics very specifically related to the specific language or locale and with little international recognition and 5 to globally well-known topics. We also ask annotators to disregard pages about inappropriate or politically sensitive topics. The rubric for locality annotation can be found in Table 3. We disregard everything with a locality rating of 3 or lower. + +
DescriptionExample
1.Extremely local and relatively obscure. Content that is of interest only to a small, localized group, such as a specific town, region, or community. These topics are typically obscure and not widely known beyond their immediate area.Local radio stations, small town historical events, regional businesses, or niche local cultural practices.
2.Regional interest. Topics that have some relevance beyond a specific locality but are still primarily of interest within a particular region or country.State or provincial politicians, regional cuisine, local sports teams, or medium-sized companies with regional influence.
3.National Significance. Content that is widely recognized within a single country, but relatively un-known internationally.National politicians (not internationally known), popular national media figures, major corporations within a country, or significant national historical events.
4.International recognition. Topics that are recognized and have relevance in multiple countries but may not be universally known across the globe. These topics often have international influence and are likely to be covered in international media, though their impact may vary by region.International brands which may be recognized in more than one country, celebrities with some international reach, significant cultural movements, or political conflicts with some awareness on the international stage.
5.Global prominence. Content that is widely recognized and relevant across a large number of countries around the world. These topics have a global impact or appeal and are likely to be well-represented in media across diverse cultures and regions.Globally famous celebrities (e.g., Cristiano Ronaldo), multinational corporations (e.g., Apple), major world events, or universally recognized cultural icons.
+ +Table 3: Rubric for locality rating task. In the locality rating task, we ask the annotators to rate paragraphs with respect to how locally relevant the topic is to the locale. + +# D.2 Question generation + +The second and main annotation step in our pipeline is the step in which we ask annotators to generate questions about sampled paragraphs. We ask annotators to generate a challenging question with a short answer. The answer should be easy to evaluate with string-matching metrics, the questions should not be open-ended or have many possible correct answers, be ambiguous or subjective, and the expected short answer should be concise. To ensure difficulty, we ask that answering the question + +requires combining information from different parts in the accompanying text; It should not be answerable by mere regurgitation of a single sentence. We furthermore ask that the question is formulated such that its answer will not change over time (e.g. not 'How many medals has Sifan Hassan won', but 'How many medals has Sifan Hassan won between 2018 and 2022 (including)'), and that the question is answerable also without the article (e.g. not 'How many tv shows did the person in this article produce?'). To facilitate validation checks in the next round, we also ask that the question authors write a longer answer to explain how they arrived at the short answer. We also ask the question authors to annotate what is the type of the correct answer (e.g. number, name, date, etc) In the pilot, we observed that – for some languages – the vast majority of questions were questions that required some form of numerical reasoning. Because the intention of the benchmark is to address knowledge more than reasoning, we afterwards restricted the number of numerical questions to $10\%$ . Similarly, we asked question authors to avoid yes/no questions. + +# D.3 Question review + +In the first round of question review, we asked annotators from a different provider to judge whether the questions abide by the rules provided to the question authors. All question reviewers are native speakers. Specifically, we ask them to check if: + +- The question pertains to a locally relevant topic +- The question is clear and understandable, and not subjective +- The question has a clear and concise answer +- If there are multiple possible variations of the answer possible (e.g. 'Dick Schoof' / 'Minister Dick Schoof' / 'Prime Minister Dick Schoof' / etc), all versions of the answer are provided. +- The question and answer are in the correct language +- The question is understandable without the article +- That the answer to the question will not likely change in the near future + +When a question can be fixed with a minor change (e.g. add a time indication to make sure an answer will not change in the near future, or add an extra answer version), we ask the question reviewers to implement this fix and describe it. In the pilot round, we use the annotator feedback to finetune our annotation protocol and provide feedback to the question-authors. During the rest of the data collection, we simply disregard questions that are not useable as is or can be corrected with minor changes. + +# D.4 Validation through question answering + +In the last stage of our question generation pipeline, we have additional annotators answer the sourced and reviewed question. The goal of this validation task is to confirm that the questions are answerable, correct, non-ambiguous when read by individuals other than the original question author, and that all possible versions of the answers are included. For each question, we ask two additional annotators to first answer the question, using the snippets the questions were sourced from for context. After they have answered the question, they are shown the list of reference answers written by the original author of the question as well as the rational they provided, and we ask them to reflect upon the answer they gave themselves. If their answer did not match any answer in the original reference list, we ask them to either add their answer to the list if it is semantically equivalent to their own answer or indicate which answer they believe to be correct, their own or the original answer. We disregard all questions where at least one annotator disagrees with the original question author. + +# E Related work + +In this paper, we introduce a new multilingual benchmark for LLMs, that we believe addresses gaps and pitfalls in existing benchmarks. We (concisely) outlined those gaps and pitfalls and mentioned several other works related to ours in the introduction of those paper. Here, we discuss multilingual evaluation of LLMs in more detail. Specifically, we discuss what datasets recent LLM releases have used for multilingual evaluation (Appendix E.1) and what other datasets and approaches they could have used but did not (Appendix E.2). + +
Claude 3.5 SonnetMGSM (Shi et al., 2023)
Gemini 2.0 FlashMentions multilingual audio, no multilingual benchmarks scores reported.
GPT4-oARC-Easy and TruthfulQA translated into five African languages (internal benchmark), Uhura-Eval (internal benchmark).
Llama 3.1MGSM (Shi et al., 2023), Multilingual MMLU (internal benchmark)
Mixtral 8x22Btranslated ARC-C, HellaSwag and MMLU (internal benchmarks)
Qwen2.5 72BM3Exam (Zhang et al., 2023), IndoMMLU (Koto et al., 2023), ruMMLU (Fenogenova et al., 2024), translated MMLU (Chen et al., 2023), Belebele (Bandarkar et al., 2024), XCOPA (Ponti et al., 2020), XWinograd (Muennighoff et al., 2023), XStoryClose (Lin et al., 2022), PAWS-X (Zhang et al., 2019), MGSM (Shi et al., 2023), Flores-101 (Goyal et al., 2022)
+ +Table 4: Multilingual evaluation of recent LLM releases, overview. We provide an overview table of the benchmark for which scores are reported in the release papers or notes of the LLMs we evaluated in this paper. Models are sorted alphabetically. + +# E.1 Multilingual evaluation of LLMs in practice + +While multilinguality is something frequently mentioned in the release papers or posts of recent LLM releases, the datasets for which they actual report scores is in most cases quite limited. Of the models that we evaluated for this paper, Gemini 2.0 Flash reported no multilingual scores at all; GPT4-o and Mixtral 8x22B report scores only on internally translated but not publicly available English benchmarks; Claude 3.5 Sonnet reports scores for only one benchmark - MGSM. MGSM is also the only publicly available benchmark for which Llama 3.1 reports scores, along with - also - an internally translated version of MMLU that is not publicly available. The only model that extensively reports multilingual benchmark values, on more than 10 benchmarks, is Qwen2.5 72B. We provide an overview of the multilingual benchmarks for which scores are reported for these models in Table 4. + +# E.2 Multilingual evaluation options for LLMs + +While, as we discuss below, there are gaps and challenges with multilingual evaluation for LLMs, there are in fact many more options than is suggested by what is reported in recent releases. Below, we discuss other options for multilingual LLM evaluation. + +Translated English benchmarks As mentioned earlier on, benchmarks used for LLM evaluation are often translated English benchmarks. In some cases, the benchmarks were designed to evaluate only English and translated later, such as translated MMLU (e.g. Li et al., 2024; Chen et al., 2023; OpenAI, 2025; Singh et al., 2024) or MMLU-ProX (Xuan et al., 2025), MGSM (Shi et al., 2023) or MLAMA (Kassner et al., 2021). In other cases, the benchmark was multilingual at the time of its creation, but means of creation of the non-English data was through translating English sourced data, such as Belebele Bandarkar et al. (2024), Mintaka (Sen et al., 2022), or X-FACTR (Jiang et al., 2020). Taken together, translated benchmarks span quite a range of tasks, such as question answering (Artetxe et al., 2020; Lewis et al., 2020; Qi et al., 2023; Ohmer et al., 2023), natural language inference (Conneau et al., 2018), paraphrase detection (Zhang et al., 2019), general linguistic competence (Jumelet et al., 2025), reading comprehension (Artetxe et al., 2020; Bandarkar et al., 2024) and commonsense reasoning (Ponti et al., 2020), and even instruction following (He et al., 2024). With the exception of question answering and of course instruction following, however, many of these tasks have gone (somewhat) out of fashion for LLM evaluation, a trend which is mirrored also in the usage of their multilingual counterparts. As mentioned before, translated benchmarks have the advantage of containing parallel data, allowing for some form of comparability across languages, but are English-centric in content and may suffer from translationese (see e.g. Romanou et al., 2024; Chen et al., 2024, for a recent discussion of this). + +Multilingual benchmarks sourced from scratch Though much rarer, there are also benchmarks that are created independently for each language they include. Clark et al. (2020) release a question answering dataset separately sourced for 11 different languages, with a protocol relatively similar + +to ours. In a different category, Hardalov et al. (2020), Zhang et al. (2023) and Romanou et al. (2024) and Sánchez et al. (2024) do not create benchmark data, but instead collect existing exam or competition questions from official human exams. In case of Zhang et al. (2023), the exams are graduation exams of primary, middle and high school; Hardalov et al. (2020) includes official state exams taken by graduating high school students, which may contain parallel pairs in case countries allow examinations to be taken in multiple languages; Romanou et al. (2024), cover academic exams at middle and high school and university level, professional certifications and licenses, and exams to obtain regional licenses. Sánchez et al. (2024) instead focus on questions from the International Linguistic Olympiad corpus. Lastly, as part of their study Ohmer et al. (2023) create a dataset called SIMPLE FACTS, containing factual questions created through a shared template filled in with language specific factual data. + +Consistency evaluation A rather different approach to assess multilinguality in LLMs is to focus not on accuracy across different languages, but to consider whether predictions are consistent across languages. This tests knowledge and skill transfer between languages more explicitly. Two recent examples of studies incorporating consistency-based evaluations on factual knowledge questions are Qi et al. (2023) and Ohmer et al. (2023). Qi et al. (2023) focuseses specifically on sample-level consistency of answers across different languages, requiring existing parallel benchmarks. Ohmer et al. (2023), instead, ask models to translate benchmark questions themselves before answering them again. This can, with some caveats, be applied to any existing monolingual benchmark, but – requiring multiple steps – it is more involved as a paradigm, and is somewhat bottlenecked by the translation ability of the model to be evaluated. + +Translation as a proxy for multilinguality Another, more implicit method to assess multilinguality in LLMs is to evaluate their ability to translate from one language to another. This approach was famously used by Brown et al. (2020), but has not been common since. + +Monolingual non-English evaluation In our discussion, we have focussed on multilingual evaluation options that cover multiple other languages. After all, a benchmark to evaluate models on Bengali (e.g. Shafayat et al., 2024) or Arabic (e.g. Alwajih et al., 2024) can contribute to multilingual evaluation when combined with other benchmarks, but does not so on its own. Because such benchmarks are usually created by language experts for the respective languages, they usually target locally relevant skills and knowledge and are likely of higher quality than benchmarks created for many languages simultaneously (either through translation or from scratch). Yet, composing a suite including many languages that allows direct comparisons between languages remains challenging. We believe such benchmarks can be important for multilingual evaluation in LLMs, but will not further discuss benchmarks focussing on individual languages or very small sets of languages within one family here. \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10356/images/02b5749be8a0b0cad9681d40427e51c88c0bc0e81cda587783d142cb140bf240.jpg b/data/2025/2504_10xxx/2504.10356/images/02b5749be8a0b0cad9681d40427e51c88c0bc0e81cda587783d142cb140bf240.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64b86ded115538dc988afc46a7fb7210a9cb0a69 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/02b5749be8a0b0cad9681d40427e51c88c0bc0e81cda587783d142cb140bf240.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee0de4a6ca9eba3dd096870f9635617cf0668d1cf5a3542eb7381b5ce186596a +size 17028 diff --git a/data/2025/2504_10xxx/2504.10356/images/07d531440daa7d359f135908c66b8f86aff2111a8ae67e0aa29fd27c7f2f0eeb.jpg b/data/2025/2504_10xxx/2504.10356/images/07d531440daa7d359f135908c66b8f86aff2111a8ae67e0aa29fd27c7f2f0eeb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a96c6de64dde78704850c44ed903a12f58232606 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/07d531440daa7d359f135908c66b8f86aff2111a8ae67e0aa29fd27c7f2f0eeb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfbe00e56140143d9df2fa0896c9d0f5ac9ff0000234aa7a7752d7b84dadaad2 +size 25184 diff --git a/data/2025/2504_10xxx/2504.10356/images/0ee44851e6330b28bb51edddb359d3b46ffd35f998c59f2c4c70256478702034.jpg b/data/2025/2504_10xxx/2504.10356/images/0ee44851e6330b28bb51edddb359d3b46ffd35f998c59f2c4c70256478702034.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0d374cb19265603723834ad02de935d5898b9cb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/0ee44851e6330b28bb51edddb359d3b46ffd35f998c59f2c4c70256478702034.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d15e9ad57e97ea67daf22f7016475c98892493131f81238898c2879c03a9a76e +size 68868 diff --git a/data/2025/2504_10xxx/2504.10356/images/187a2566712d7e7b5269313bad6b1bd1cade176fdb265f30224cbd55ff0b8bdc.jpg b/data/2025/2504_10xxx/2504.10356/images/187a2566712d7e7b5269313bad6b1bd1cade176fdb265f30224cbd55ff0b8bdc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2aa7d45bb6094ebf8160973bcfc8f2ee1909012 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/187a2566712d7e7b5269313bad6b1bd1cade176fdb265f30224cbd55ff0b8bdc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf63c54de6950fac9e3364f897ff7c2aca562a9fe7aaa019a8ae3620667d7ed3 +size 93610 diff --git a/data/2025/2504_10xxx/2504.10356/images/4292a85211fc2019ae5f9eb0d70959896e6e3c18041a561b5072bafe1218ace8.jpg b/data/2025/2504_10xxx/2504.10356/images/4292a85211fc2019ae5f9eb0d70959896e6e3c18041a561b5072bafe1218ace8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..254d038c6d1d72e9c39190f7c71b217e870f2bf6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/4292a85211fc2019ae5f9eb0d70959896e6e3c18041a561b5072bafe1218ace8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e55363b2a925455debdc7d447bbac620058df4c81905cc31f77e5363773f1555 +size 43734 diff --git a/data/2025/2504_10xxx/2504.10356/images/58b5ac41956d6eb877cd3c5fb7488a56cf7367f079130be27c34bff2cd0898d3.jpg b/data/2025/2504_10xxx/2504.10356/images/58b5ac41956d6eb877cd3c5fb7488a56cf7367f079130be27c34bff2cd0898d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..21ea2fbdaafeea63a5c57a9a996dd2d91a4970b7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/58b5ac41956d6eb877cd3c5fb7488a56cf7367f079130be27c34bff2cd0898d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0931adb259fb00740495d7914fb69c94507d7d2f6ef76cf4deb7d4178feebbe +size 27876 diff --git a/data/2025/2504_10xxx/2504.10356/images/5a773333e4ab35574de8f34ba1e9ec4e4d7fb66080d19020a6de0b49c9e62b74.jpg b/data/2025/2504_10xxx/2504.10356/images/5a773333e4ab35574de8f34ba1e9ec4e4d7fb66080d19020a6de0b49c9e62b74.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f735c8da46ed41fc8a2bd3a3e1944e132e8a86df --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/5a773333e4ab35574de8f34ba1e9ec4e4d7fb66080d19020a6de0b49c9e62b74.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5fc400a1b247916ebd0ab2fb650dea4c6ce7d88ab8a2c1821b4611dd38ae7c2 +size 97009 diff --git a/data/2025/2504_10xxx/2504.10356/images/5d86d006a27cdf31406eb72a1b910b6a9d13150c13b3ee9064c1db294d2d41f2.jpg b/data/2025/2504_10xxx/2504.10356/images/5d86d006a27cdf31406eb72a1b910b6a9d13150c13b3ee9064c1db294d2d41f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dfbf8751ce059c649501ecd68992d5f75faed90f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/5d86d006a27cdf31406eb72a1b910b6a9d13150c13b3ee9064c1db294d2d41f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e92949dc4c58037c8be7aafff70cb4db132c738538b2936b366b2a5f898d6ac8 +size 20417 diff --git a/data/2025/2504_10xxx/2504.10356/images/5e9488ee42bcffe9751990decbf76ca92024b4aaa74c5ac9b05c6f80f75e0642.jpg b/data/2025/2504_10xxx/2504.10356/images/5e9488ee42bcffe9751990decbf76ca92024b4aaa74c5ac9b05c6f80f75e0642.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff16c1d3dcf918bbbe087a9aeb45d66bd1362f68 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/5e9488ee42bcffe9751990decbf76ca92024b4aaa74c5ac9b05c6f80f75e0642.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1de452b73f377b255d6fc3e880519b324579c39b4227603da5ea8974f27fe79b +size 38108 diff --git a/data/2025/2504_10xxx/2504.10356/images/6097bbba21f178dc4be15a6d96ad58e0b1c3c7cc17e299a76a31147c2d105aca.jpg b/data/2025/2504_10xxx/2504.10356/images/6097bbba21f178dc4be15a6d96ad58e0b1c3c7cc17e299a76a31147c2d105aca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9eb842173a09d145598da75a9650f84ad81f385 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/6097bbba21f178dc4be15a6d96ad58e0b1c3c7cc17e299a76a31147c2d105aca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07661fe59807570b4cb70b79dcba9faaa448c861baf4e73fb033571d3533763b +size 108233 diff --git a/data/2025/2504_10xxx/2504.10356/images/8d54153ccb122aaea2874537a950dad799895f33acc86ca682873fd95442f949.jpg b/data/2025/2504_10xxx/2504.10356/images/8d54153ccb122aaea2874537a950dad799895f33acc86ca682873fd95442f949.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e75d1fdd818a5da2fcc42c9e09d821be1691e970 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/8d54153ccb122aaea2874537a950dad799895f33acc86ca682873fd95442f949.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d03352abd589d3b99f421214de0e49a30c3ea93baec4127b99d5db923bbf8034 +size 136700 diff --git a/data/2025/2504_10xxx/2504.10356/images/b0918ad445a1cd7b34ff620e97c14ae4d7e0eab4778d0e71993ce777ae50542b.jpg b/data/2025/2504_10xxx/2504.10356/images/b0918ad445a1cd7b34ff620e97c14ae4d7e0eab4778d0e71993ce777ae50542b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab8dd528c6598791467cef8939fd1d36731f3f81 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/b0918ad445a1cd7b34ff620e97c14ae4d7e0eab4778d0e71993ce777ae50542b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f147aeb00912166bbafd228fb039bd90fc3cc12ac141279f14cd1caa2a1551f +size 29465 diff --git a/data/2025/2504_10xxx/2504.10356/images/b801df6ef4552fe7a4acd00f70e7b0057dc62f87938a607d60d51710da9f7fbd.jpg b/data/2025/2504_10xxx/2504.10356/images/b801df6ef4552fe7a4acd00f70e7b0057dc62f87938a607d60d51710da9f7fbd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a29c2feebbd8610e1d2eb2d058b4938d50b0c13 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/b801df6ef4552fe7a4acd00f70e7b0057dc62f87938a607d60d51710da9f7fbd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4b0a9cb74b81063ab76ab659961be317add5ff279877aa0c13876a90f96c573 +size 60673 diff --git a/data/2025/2504_10xxx/2504.10356/images/ba1c71502e3d1b62d80b80df10464eeef839823bfd41082afe99302cc0c1fa68.jpg b/data/2025/2504_10xxx/2504.10356/images/ba1c71502e3d1b62d80b80df10464eeef839823bfd41082afe99302cc0c1fa68.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46a9aef631aee0fece4423c8349c50c558ada19d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/ba1c71502e3d1b62d80b80df10464eeef839823bfd41082afe99302cc0c1fa68.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:666d1da09545991e270d3e3ff60eb6522b198d0888302d20e0a5c23e406cf399 +size 29225 diff --git a/data/2025/2504_10xxx/2504.10356/images/bd414948b1fc64eac369d890930e1fd6fdc455be1d18c2df3bdd0cd54887fd29.jpg b/data/2025/2504_10xxx/2504.10356/images/bd414948b1fc64eac369d890930e1fd6fdc455be1d18c2df3bdd0cd54887fd29.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8387c297a88eeceb3439afb11543b1ca083feb1d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/bd414948b1fc64eac369d890930e1fd6fdc455be1d18c2df3bdd0cd54887fd29.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf5da394aef8c242e2ce849b2134a0b9af694922be4795b9fa19d5e3e08f5e0b +size 83910 diff --git a/data/2025/2504_10xxx/2504.10356/images/cc7d7f7d7a0ea73f1bd888448d909a38821f0cdcc30c4807be351475caeba83c.jpg b/data/2025/2504_10xxx/2504.10356/images/cc7d7f7d7a0ea73f1bd888448d909a38821f0cdcc30c4807be351475caeba83c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe563773ca41df3019a7c12d702d7a4868b634c4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/cc7d7f7d7a0ea73f1bd888448d909a38821f0cdcc30c4807be351475caeba83c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:daf02bd96a665a8fc471063972b8ea560e5eeacc2d5b395f4331919ce50cd753 +size 34509 diff --git a/data/2025/2504_10xxx/2504.10356/images/e5f1a6301f9eacb88d277334073c9d3bc06a6bceaf152447d2047710024fbe06.jpg b/data/2025/2504_10xxx/2504.10356/images/e5f1a6301f9eacb88d277334073c9d3bc06a6bceaf152447d2047710024fbe06.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7748989afa14afdae7b89f0f8f3534424b426fd1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/e5f1a6301f9eacb88d277334073c9d3bc06a6bceaf152447d2047710024fbe06.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dda71e0b598701aae1ff58219a8d96bf51bac172d43a1562a4e8fce74a51bf2 +size 33291 diff --git a/data/2025/2504_10xxx/2504.10356/images/fa2dfae75eef0a7bb838f0dfc25bd66e8d7acffe982633b0c359ac0cb43632f3.jpg b/data/2025/2504_10xxx/2504.10356/images/fa2dfae75eef0a7bb838f0dfc25bd66e8d7acffe982633b0c359ac0cb43632f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f1e07716f531abe0e97581a5a9f218f9fb25dc7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/images/fa2dfae75eef0a7bb838f0dfc25bd66e8d7acffe982633b0c359ac0cb43632f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a707af3f34a9b12893a26ed1a140fdf262be0b944eb705b5dd201e741f06196 +size 228257 diff --git a/data/2025/2504_10xxx/2504.10356/layout.json b/data/2025/2504_10xxx/2504.10356/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..df28a9f9924091445a92e8b15b09496e4382a322 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10356/layout.json @@ -0,0 +1,10796 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 139, + 97, + 471, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 97, + 471, + 138 + ], + "spans": [ + { + "bbox": [ + 139, + 97, + 471, + 138 + ], + "type": "text", + "content": "MultiLoKo: a multilingual local knowledge benchmark for LLMs spanning 31 languages" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 219, + 178, + 390, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 178, + 390, + 190 + ], + "spans": [ + { + "bbox": [ + 219, + 178, + 390, + 190 + ], + "type": "text", + "content": "Dieuwke Hupkes* Nikolay Bogoychev*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 294, + 191, + 317, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 191, + 317, + 201 + ], + "spans": [ + { + "bbox": [ + 294, + 191, + 317, + 201 + ], + "type": "text", + "content": "Meta" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 223, + 201, + 388, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 201, + 388, + 213 + ], + "spans": [ + { + "bbox": [ + 223, + 201, + 388, + 213 + ], + "type": "text", + "content": "{dieuwkehupkes,nbogoych}@meta.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 281, + 241, + 329, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 241, + 329, + 253 + ], + "spans": [ + { + "bbox": [ + 281, + 241, + 329, + 253 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 265, + 470, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 265, + 470, + 506 + ], + "spans": [ + { + "bbox": [ + 140, + 265, + 470, + 506 + ], + "type": "text", + "content": "We present MultiLoKo, a new benchmark for evaluating multilinguality in LLMs covering 31 languages. MultiLoKo consists of three partitions: a main partition consisting of 500 questions per language, separately sourced to be locally relevant to the specific language, and two translated partitions, containing human-authored translations from 30 non-English languages to English and vice versa. For comparison, we also release corresponding machine-authored translations. The data is equally distributed over two splits: a dev split and a blind, out-of-distribution test split. MultiLoKo can be used to study a variety of questions regarding the multilinguality of LLMs as well as meta-questions about multilingual benchmark creation. We compute MultiLoKo scores for 11 base and chat models marketed to be multilingual and study their average performance, their performance parity across languages, how much their ability to answer questions depends on the question language, and which languages are most difficult. None of the models we studied performs well on MultiLoKo, as indicated by low average scores as well as large differences between the best and worst scoring languages. Furthermore, we find a substantial effect of the question language, indicating suboptimal knowledge transfer between languages. Lastly, we find that using local vs English-translated data can result in differences more than 20 points for the best performing models, drastically change the estimated difficulty of some languages. For using machines instead of human translations, we find a weaker effect on ordering of language difficulty, a larger difference in model rankings, and a substantial drop in estimated performance for all models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 525, + 190, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 525, + 190, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 190, + 537 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 548, + 504, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 504, + 638 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 504, + 638 + ], + "type": "text", + "content": "With the growing presence and deployment of LLMs across the world, evaluating their abilities in languages other than English becomes more and more eminent. Yet, studying and evaluating multilinguality in LLMs remains a challenging enterprise, and it is hardly exaggerated to call the current state of multilingual evaluation in LLMs insufficient. Older multilingual benchmarks such as PAWS-X (Zhang et al., 2019), XNLI (Conneau et al., 2018) or XCOPA (Ponti et al., 2020) often do not fit the demands for evaluating auto-regressive models and are rarely used to evaluate recent models. Furthermore, their coverage of languages is relatively small compared to the number of languages in which LLMs are intended to be proficient." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 639, + 506, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 506, + 706 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 506, + 706 + ], + "type": "text", + "content": "More often used for LLM evaluation are benchmarks translated from English, such as MGSM (translated GSM8K, Shi et al., 2023), MMMLU (tranlated MMLU, OpenAI, 2025) or (less frequently) Belebele (Bandarkar et al., 2024). These benchmarks provide good coverage over many languages, but using translated data comes with its own set of issues. One such issue is that even when human-rather than machine-authored translations are used, translated data is known to differ from native text in several ways (Clark et al., 2020). Furthermore, using translated benchmarks imposes a strong" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 209, + 37, + 560 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 209, + 37, + 560 + ], + "spans": [ + { + "bbox": [ + 14, + 209, + 37, + 560 + ], + "type": "text", + "content": "arXiv:2504.10356v2 [cs.CL] 15 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 712, + 195, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 712, + 195, + 723 + ], + "spans": [ + { + "bbox": [ + 117, + 712, + 195, + 723 + ], + "type": "text", + "content": "*Equal contributions" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "content": "English-centric bias: translated data may be multilingual on the surface, it is not in its content. The benchmarks MLQA (Lewis et al., 2020) and TidyQA (Clark et al., 2020) to some extent address the issue by sourcing data separately for different languages. Even in their sourcing protocols, however, there is no explicit focus on selecting locally relevant content for the chosen languages. In addition to that, their coverage is again small compared to the above mentioned translated benchmarks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 129, + 506, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 129, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 104, + 129, + 506, + 163 + ], + "type": "text", + "content": "In response to these issues, we introduce a wide-coverage multilingual benchmark with locally-sourced questions for 31 different languages. Because the benchmark targets multilingual local knowledge, we dub it MultiLoKo. The release of MultiLoKo serves two interconnected goals:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 127, + 165, + 478, + 189 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 127, + 165, + 382, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 165, + 382, + 175 + ], + "spans": [ + { + "bbox": [ + 127, + 165, + 382, + 175 + ], + "type": "text", + "content": "1) Provide a better means to evaluate multilinguality in LLMs;" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 127, + 178, + 478, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 178, + 478, + 189 + ], + "spans": [ + { + "bbox": [ + 127, + 178, + 478, + 189 + ], + "type": "text", + "content": "2) Provide data to study the effect of various design choices in multilingual evaluation." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 191, + 506, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 191, + 506, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 191, + 506, + 301 + ], + "type": "text", + "content": "To address our first goal, we create 500 questions per language, written from scratch for each language, using a sourcing protocol specifically designed to ensure local relevance of the question topics. To also reap the benefits of parallel data, we commissioned both human and machine-authored translations for all non-English questions into English and vice versa, providing a total of 15500 parallel questions, sourced across the 31 languages in the benchmark. The translated data facilitates the study of transfer between languages and also serves our second goal. By comparing the English-translated data with the locally sourced data, we can explicitly compare the adequacy of using translated benchmarks; by comparing human- with machine-authored translations, we can better estimate the potential issues of the latter. To prevent quick overfitting and inadvertent contamination, we release a development set of the benchmark, while test scores can only be obtained through an external provider.3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 304, + 504, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 369 + ], + "type": "text", + "content": "We provide elaborate analyses for both our goals. We compute average performance and language parity scores on the locally sourced data for 11 models marketed for their multilinguality (§ 5.1); we investigate whether these models exhibit knowledge transfer between different languages (§ 5.2); we study the impact of local sourcing versus translating on model rankings and language difficulty (§ 5.4.1); we analyse the difficulty of the included languages through various lenses (§ 5.3); and we conduct an analysis into the difference between human- and machine-authored translation (§ 5.4.3)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 371, + 504, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 371, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 504, + 449 + ], + "type": "text", + "content": "We find that, of the models we consider, the best performing model is Gemini 2.0 Flash, with an average performance of 34.4 points, and an almost 35 point gap between the best and the worst language. Llama 3.1 405B and GPT4-o are close contenders in terms of average scores (34.3 and 34.0, respectively), but both have substantially higher language gaps (39 and 49 points). Almost across the board, model performances are better when questions are asked in the language to which the content is relevant, indicating suboptimal knowledge transfer between languages, a result that is mirrored by low response-consistency across question language." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 450, + 506, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 450, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 450, + 506, + 582 + ], + "type": "text", + "content": "Next, we study the relevance of using locally sourced data as opposed to translated English data as well as whether it matters if translations are authored by humans or machines. We find that the estimated difficulty of some languages changes drastically across the two sourcing setups, within the range of 15 points decrease and 8 points increase on average across models. The rank correlation between average language difficulty score is 0.78. Furthermore, individual model scores between locally and English-translated data can differ up to 22 points for some languages. However, changing the sourcing setup does not impact model rankings, suggesting that using translated data may be suitable for comparing models but less for model development or language prioritisation. For using machine- instead of human-authored translations, as well, the effect on model ranking is limited " + }, + { + "bbox": [ + 104, + 450, + 506, + 582 + ], + "type": "inline_equation", + "content": "(R = 0.97)" + }, + { + "bbox": [ + 104, + 450, + 506, + 582 + ], + "type": "text", + "content": ", but the difficulty estimates of various languages changes with up to 12 points. Furthermore, using machine translated data results in lower average scores for all models, with drops ranging from 2 to " + }, + { + "bbox": [ + 104, + 450, + 506, + 582 + ], + "type": "inline_equation", + "content": "34\\%" + }, + { + "bbox": [ + 104, + 450, + 506, + 582 + ], + "type": "text", + "content": " of the human-translated scores." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 587, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 587, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 587, + 506, + 654 + ], + "type": "text", + "content": "Outline In the remainder of this paper, we first describe our dataset collection protocol and the dataset itself in § 2 and § 3, respectively. In § 4, we describe our experimental setup. In § 5, we present a range of different results, covering (among other things), the summary of results described above. We conclude in § 6. As we discussed quite some related work above, we do not include a separate related work section in the main paper, but we do provide a discussion of a wider range of multilingual datasets in Appendix E." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 104, + 659, + 504, + 691 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 691 + ], + "type": "inline_equation", + "content": "{}^{2}" + }, + { + "bbox": [ + 104, + 659, + 504, + 691 + ], + "type": "text", + "content": " An exception to this is the benchmark EXAMS (Hardalov et al., 2020),which consists of exams separately sourced for each language. For reasons unknown to the authors of this work, it was never used for any prominent LLM release, with the exception of (Dubey et al., 2024), who deployed it for training rather than evaluation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 692, + 504, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 692, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 692, + 504, + 721 + ], + "type": "text", + "content": "3The MultiLoKo data, five few-shot examples per language, an evaluation script, a set of language-specific prompts, and information about test-score submissions can be found at https://github.com/facebookresearch/multiloko/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 217, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 217, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 217, + 83 + ], + "type": "text", + "content": "2 Dataset collection" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 96, + 504, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 96, + 504, + 152 + ], + "spans": [ + { + "bbox": [ + 104, + 96, + 504, + 152 + ], + "type": "text", + "content": "The main data collection protocol of MultiLoKo is similar to the protocol used by the well-known benchmark SQuAD (Rajpurkar et al., 2016): we source articles from Wikipedia and ask annotators to generate questions about paragraphs sampled from these articles. After that, we run several rounds of quality control on the generated questions and commission human- and machine-authored translations of all data. Our collection protocol consists of five steps." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 158, + 506, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 158, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 158, + 506, + 236 + ], + "type": "text", + "content": "Step 1: Paragraph selection The first step in our protocol is the sampling of the 6K most visited Wikipedia pages for each language for the period of 2016-2021. We sample paragraphs from those pages by randomly selecting a word in the page and expanding left and right until we reach 3K characters. Next, we ask annotators to judge the local relevance of the samples on a scale from 1 to 5, where 1 refers to topics specific to the language (e.g. a Swedish singer not known outside of Sweden) and 5 to globally well-known topics (e.g. 'Youtube'). We disregard all topics that have a locality score above 3. The full rubric and annotation instructions can be found in Appendix D.1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 243, + 504, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 310 + ], + "type": "text", + "content": "Step 2: Question generation In step 2, we ask native speakers to generate challenging questions about the content in the paragraphs. To facilitate automatic scoring, we ask that the questions are closed-form questions, with only one correct short answer. To ensure that the annotation instructions are understandable and appropriate for each locale and the questions of high quality, we run a pilot with 50 questions separately for each language. After our pilot, we commission 500 additional samples for each language, to leave a " + }, + { + "bbox": [ + 104, + 243, + 504, + 310 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 243, + 504, + 310 + ], + "type": "text", + "content": " margin to disregard questions in the rest of the process." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 316, + 504, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 372 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 372 + ], + "type": "text", + "content": "Step 3: Question review For each generated question, we ask a new set of annotators from a separate provider to judge whether the generated questions abide by the annotation instructions, to flag any possible issues, and to mark if the question is useable as is, would be useable with a small adaptation or should be disregarded. We ask annotators to fix small annotators on the spot, and as respective vendors that questions with larger issues are replaced." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 378, + 506, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 506, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 506, + 445 + ], + "type": "text", + "content": "Step 4: Question answering As a last quality control step, we ask two annotators different from the creator of the question to answer the questions. In this stage, we do not ask annotators to correct questions, but we simply disregard all questions for which either annotator thinks the original answer was incorrect, or the annotator provided an answer not matching the original answer because of ambiguities in the question. The only corrections we allow in this stage are additions of additional, semantically equivalent, correct answers (e.g. 'four' as an alternative to '4')." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 451, + 506, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 506, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 506, + 539 + ], + "type": "text", + "content": "Step 5: Translation Lastly, we translate the non-English data back to English and vice versa. This effort serves two purposes. First, it allows to study generalisation of knowledge and skills between English and non-English languages through a direct comparison of the same questions. Second, it facilitates inspection of the topics and questions for all languages of the dataset, without the need to be able to speak all those languages. As automatic translation of benchmarks is relatively common practice in the field (e.g. Li et al., 2024), we commission both human and machine translations and study their difference as part of our analysis. For the machine translations, we use Google Translate sentence based cloud API." + }, + { + "bbox": [ + 104, + 451, + 506, + 539 + ], + "type": "inline_equation", + "content": "^{4}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 555, + 243, + 568 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 243, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 243, + 568 + ], + "type": "text", + "content": "3 MultiLoKo the dataset" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "text", + "content": "MultiLoKo consists of three main components: i) the collected data; ii) a set of multilingual prompts to prompt base- and chat models; and iii) a set of metrics." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 617, + 209, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 617, + 209, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 209, + 628 + ], + "type": "text", + "content": "3.1 The collected data" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 638, + 377, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 377, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 377, + 651 + ], + "type": "text", + "content": "The data in MultiLoKo consists of several partitions and two splits." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 656, + 506, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 506, + 701 + ], + "type": "text", + "content": "Partitions MultiLoKo contains one main partition, containing locally-soured data for 31 languages, including English. In addition to that, it contains four translated partitions. Two of those are human-translated partitions: human-translated-from-english, consisting of human-authored translations of English data into the 30 other languages in MultiLoKo," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 117, + 710, + 304, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 710, + 304, + 722 + ], + "spans": [ + { + "bbox": [ + 117, + 710, + 304, + 722 + ], + "type": "text", + "content": "4https://cloud.google.com/translate?hl " + }, + { + "bbox": [ + 117, + 710, + 304, + 722 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 117, + 710, + 304, + 722 + ], + "type": "text", + "content": " en" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 161 + ], + "type": "text", + "content": "human-translated-to-english containing human-authored translations of the non-English subsets into English. The other two are machine-translated partitions following the same pattern: machine-translated-from- english, contains machine-authored translations of English data into 30 other languages, and machine-translated-to- english contains machine-authored translations of the non-English subsets into English. All partitions contain 500 samples per language - thus in total 15500 samples in the main partition, and 15000 samples in the translated partitions. Statistics about the dataset such as the distribution over answer types and the average prompt length can be found in Appendix A. Results relating to the difficulty of the benchmark can be found in " + }, + { + "bbox": [ + 104, + 72, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\S 5" + }, + { + "bbox": [ + 104, + 72, + 506, + 161 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 167, + 506, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 506, + 277 + ], + "type": "text", + "content": "Splits Each partition is divided equally over two splits: a dev split that can be used for development, and a blind test split. Each of these splits thus contains 250 samples per language. Until the test split is publicly released, results can only be obtained through model submissions. The splits are not random, but constructed such that for each language the most frequently visited pages are in the dev split while the least frequently visited pages are in the test split, roughly preserving the distribution of answer types (e.g. number, name, year, etc). The test split can thus be seen as an out-of-distribution (ood) split, specifically meant to assess generalisation (which is challenging in the context of LLMs, see e.g. Hupkes et al., 2023). In § 5.4.2 we provide an analysis of the extent to which the split is truly an ood split, by analysing its difficulty. The results reported in the results section of the paper are dev results." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 291, + 266, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 291, + 266, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 266, + 303 + ], + "type": "text", + "content": "3.2 Prompts and few-shot examples" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 311, + 504, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 311, + 504, + 377 + ], + "spans": [ + { + "bbox": [ + 104, + 311, + 504, + 377 + ], + "type": "text", + "content": "Running MultiLoKo requires prompts. In the spirit of getting truly multilingually appropriate results, we design prompts separately for each language and release them along with the data. The prompts are written by different linguistic experts for the various languages, in consultation with the benchmark creators to ensure they are appropriate for LLMs. We provide prompts for base models and chat models that allow for incorporating up to five few-shot examples, which we also provide.6 All prompts and few-shot examples can be found in the MultiLoKo repository." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 391, + 163, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 391, + 163, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 391, + 163, + 402 + ], + "type": "text", + "content": "3.3 Metrics" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 411, + 504, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 411, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 504, + 456 + ], + "type": "text", + "content": "MultiLoKo has two main metrics and two auxiliary metrics. The two main metrics - Exact Match accuracy (EM) and Gap - capture the overall performance of MultiLoKo and are computed on the main partition, whereas the two auxiliary metrics - Mother Tongue Effect (MTE) and Locality Effect (LE) - combine information from different partitions. We provide a cheat-sheet in Table 1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 462, + 504, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 462, + 504, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 462, + 504, + 561 + ], + "type": "text", + "content": "EM and Gap EM indicates the performance of a model on a single language or averaged across languages, as measured by the percentage of times the model (after post-processing) provides an answer that verbatim matches one of the answers in the reference list. Gap, defined as the difference between the best and the worst performing language in the benchmark, is a measure of parity across the individual languages within the benchmark. Taken together, EM and Gap provide a good indication of how well a model is faring on MultiLoKo. Because both gap and EM are binary metrics that may be open to false negatives, we also considered the partial match metrics BLEU (Papineni et al., 2002), ChrF (Popovic, 2015) and contains. We did not find any novel patterns using those metrics, but include them in our implementation for future research." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 567, + 506, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 567, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 506, + 668 + ], + "type": "text", + "content": "MTE Because of the 2x2 design of MultiLoKo, in which we translated non-English data back to English and vice versa, we can compute several metrics related to locality of the requested information. MTE is one of such metrics. It expresses the impact of asking a question in a language to which that question is relevant. We quantify MTE (for non-English languages only), as the difference between the EM score of the locally sourced data asked in the corresponding language (e.g. asking a question about a local Bengali radio station in Bengali) and the EM score when the same questions are asked in English. A positive MTE indicates that information is more readily available when it is relevant to the language in which it was asked, whereas a negative MTE indicates that the information is more easily accessible in English. MTE is a measure related to transfer as well as language proficiency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 116, + 670, + 436, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 670, + 436, + 681 + ], + "spans": [ + { + "bbox": [ + 116, + 670, + 436, + 681 + ], + "type": "text", + "content": "5More details can be found at https://github.com/facebookresearch/multiloko/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 681, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 506, + 723 + ], + "type": "text", + "content": "6In several recent works it has been shown that prompts can have a substantial impact on model scores (e.g. Weber et al., 2023; Mizrahi et al., 2024). Given the large number of languages in the benchmark and the fact that those are not all mastered by the main authors, we did not include a systematic search through prompts, but presented our best-effort results." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 70, + 504, + 235 + ], + "blocks": [ + { + "bbox": [ + 108, + 70, + 504, + 235 + ], + "lines": [ + { + "bbox": [ + 108, + 70, + 504, + 235 + ], + "spans": [ + { + "bbox": [ + 108, + 70, + 504, + 235 + ], + "type": "table", + "html": "
Average EMThe first main metric we use to quantify performance for MultiLoKo is the average Exact Match score across languages, which expresses how many of the answers match one of the gold standard answers verbatim (after post-processing the answers).
GapThe second main metric is the gap between a model's best and worst performing language. We gap to quantify the extent to which a model has achieved parity across languages. Because a small gap can be achieved both through parity on high scores as parity on low scores, it is most informative in combination with average benchmark performance.
Mother tongue effect (MTE)MTE expresses the impact of asking questions in a language in which the requested information is locally salient, compared to asking it in English. A positive MTE indicates information is more readily available in the language it was (likely) present in the training data, whereas a negative mother tongue effect indicates the information is more easily accessible in English.
Locality effect (LE)LE quantifies the effect of using locally sourced vs translated data. It is measured by computing the difference between scores for locally sourced data and translated English-sourced data. A positive LE implies that using translated English data underestimates performance on a language, a negative LE that using translated English data overestimates performance.
", + "image_path": "8d54153ccb122aaea2874537a950dad799895f33acc86ca682873fd95442f949.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "lines": [ + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "type": "text", + "content": "Table 1: MultiLoKo metric cheatsheet. We use several metrics to quantify model performance using MultiLoKo. This table provides a cheatsheet for their meaning." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "type": "text", + "content": "LE The locality effect (LE) is a measure of how much performance on knowledge tasks is over- or underestimated through the use of using translated English data, as opposed to locally relevant data. We quantify the locality effect as the difference in EM for English translated data and locally sourced data. If for a language the English translated data has as a higher EM, the LE is positive, indicating that using English translated data likely overestimating a model's ability on providing knowledge for that language. If the LE is negative the English translated data may provide an underestimation of the score for that language. Note that because we often observe both positive and negative LEs for the 30 non-English languages in MultiLoKo, the average LE across languages may be small, even if the differences for individual languages may be large." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 409, + 228, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 409, + 228, + 424 + ], + "spans": [ + { + "bbox": [ + 105, + 409, + 228, + 424 + ], + "type": "text", + "content": "4 Experimental setup" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 434, + 504, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 458 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 458 + ], + "type": "text", + "content": "We test and showcase our benchmark by running experiments with 11 different models of varying sizes, that were all marketed to have multilingual abilities." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 467, + 163, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 467, + 163, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 467, + 163, + 479 + ], + "type": "text", + "content": "4.1 Models" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 487, + 504, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 504, + 555 + ], + "type": "text", + "content": "To test the extent to which MultiLoKo provides useful signal across training stages, we consider both base and chat models. The base models we include in our experiments are Llama 3.1 70B and 405B (Dubey et al., 2024), Mixtral 8x22B (team, 2024), and Qwen 2.5 72B (Qwen et al., 2025), the seven chat models are Gemini 2.0 Flash (Google DeepMind, 2024), GPT4-o (OpenAI et al., 2024), Claude 3.5 Sonnet (Anthropic, 2025), Llama 3.1 70B and 405B Chat, Mixtral 8x22B-it, and Qwen 2.5 72B instruct. As mentioned before, we run chat and base models with separate prompts." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 569, + 215, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 569, + 215, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 569, + 215, + 582 + ], + "type": "text", + "content": "4.2 Experimental setup" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 590, + 504, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 504, + 624 + ], + "type": "text", + "content": "We run all of our experiments with the generation temperature set to 0. To facilitate automatic evaluation, we include an instruction to answer questions curtly and precisely, producing only a number/name/location/etc. Full template information can be found in our github repository." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 628, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 504, + 651 + ], + "type": "text", + "content": "Few-shot prompting For base models we use a 5-shot prompt. For chat models, we use a 0-shot prompt, as this is the most likely use mode by chat model users." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 656, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 506, + 723 + ], + "type": "text", + "content": "Post-processing Because base models are good at following the instructions, minimal postprocessing is needed: we only lowercase the output and strip punctuation. Chat models often deviate from the required format, especially in English, in various ways that we discuss in Appendix B. To evaluate such models beyond their instruction-following issues, we perform more complex post-processing, aiming to remove any words resembling \"answer\" from the LLM output, as well as several special cases for English and Japanese. We provide full details about post-processing in Appendix C." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 119, + 70, + 486, + 215 + ], + "blocks": [ + { + "bbox": [ + 119, + 70, + 486, + 215 + ], + "lines": [ + { + "bbox": [ + 119, + 70, + 486, + 215 + ], + "spans": [ + { + "bbox": [ + 119, + 70, + 486, + 215 + ], + "type": "table", + "html": "
ModelEMGapMother tongue effectLocality effect
Gemini 2.0 Flash34.39± 2.9034.806.12± 1.900.36± 3.40
Llama 3.1 405B34.31± 2.7039.206.37± 1.700.62± 2.70
GPT4-o33.97± 3.6048.803.08± 2.000.35± 2.90
Llama 3.1 405B Chat27.70± 3.2040.803.97± 2.20-1.11± 2.70
Llama 3.1 70B26.92± 2.6028.802.72± 1.70-0.30± 3.10
Claude 3.5 Sonnet26.89± 4.4047.6024.18± 4.200.81± 2.90
Llama 3.1 70B Chat21.65± 2.8042.400.49± 1.60-3.32± 3.30
Mixtral 8x22B21.64± 4.2043.60-2.18± 3.00-0.65± 2.60
Qwen2.5 72B19.66± 2.3028.402.45± 2.10-2.28± 2.70
Mixtral 8x22B-it10.10± 3.1039.20-5.41± 2.00-0.54± 1.70
Qwen2.5 72B instruct2.54± 0.708.00-1.52± 1.000.43± 0.70
", + "image_path": "187a2566712d7e7b5269313bad6b1bd1cade176fdb265f30224cbd55ff0b8bdc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 218, + 504, + 251 + ], + "lines": [ + { + "bbox": [ + 104, + 218, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 218, + 504, + 251 + ], + "type": "text", + "content": "Table 2: Aggregate results dev. We report average EM, gap, mother tongue effect and locality effect for all 11 models on the MultiLoKo dev split. For EM, MTE and LE, we also indicate a confidence interval equal to two times the standard error across languages. Models are sorted by average EM." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 277, + 164, + 290 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 277, + 164, + 290 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 164, + 290 + ], + "type": "text", + "content": "5 Results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 302, + 504, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 302, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 104, + 302, + 504, + 401 + ], + "type": "text", + "content": "As MultiLoKo has several partitions, there are many different results that can be computed. On a high level, we consider four different types of results. First, in § 5.1, we report average model results across several categories, including the average performance and an indicator of parity across languages. Next, in § 5.2, we dive deeper into the knowledge transfer occurring from one language to another, within individual models. In § 5.3, instead, we focus on differences between individual languages. Lastly, in § 5.4, we look in more detail at the dataset itself through the lens of model results, considering in particular the effect of locally sourcing data as opposed to translating English sourced data (§ 5.4.1), differences between our dev and test split (§ 5.4.2) and the difference between using human and machine translated data (§ 5.4.3)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 414, + 305, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 414, + 305, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 305, + 426 + ], + "type": "text", + "content": "5.1 Aggregate results: EM and language gap" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 434, + 504, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 490 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 490 + ], + "type": "text", + "content": "In Table 2, we provide a summary of the average dev results. Specifically, for each model, we report average EM and the gap between the best and the worst language, along with average MTE and LE, which we will discuss in a later section.7 We report average MTE, EM and LE along with a confidence interval equal to two times the standard error across languages, roughly equalling previously used " + }, + { + "bbox": [ + 104, + 434, + 504, + 490 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 104, + 434, + 504, + 490 + ], + "type": "text", + "content": " confidence intervals (Madaan et al., 2024; Dubey et al., 2024)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 501, + 248, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 501, + 248, + 513 + ], + "spans": [ + { + "bbox": [ + 105, + 501, + 248, + 513 + ], + "type": "text", + "content": "5.1.1 Model performance (EM)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 519, + 506, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 519, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 506, + 640 + ], + "type": "text", + "content": "In Figure 1a, we show a boxplot of the distribution of the EM scores across models, ordered by average EM. The best performing models are Gemini 2.0 Flash, Llama 3.1 405B, and GPT4-o, while Mixtras 8x22B and the Qwen2.5 72B populate the lower rankings on the list. Somewhat surprisingly, base models are generally outperforming chat models on the benchmark, this is partly due to false refusals and poor instruction following in the chat models. In some cases, however, the chat models simply just provide a qualitatively different answer than the base models. The figure shows that MultiLoKo is a relatively difficult benchmark across the board: the average EM of even the best performing model barely exceeds 30, while the bottom performing models have EM scores lower than 20. Also scores for the easiest languages (see also § 5.3) are capped below 50. Furthermore, for virtually all models performance varies starkly between languages, suggesting that none of the models we considered are evenly multilingual across the 31 languages in MultiLoKo." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 651, + 157, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 651, + 157, + 663 + ], + "spans": [ + { + "bbox": [ + 105, + 651, + 157, + 663 + ], + "type": "text", + "content": "5.1.2 Gap" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 670, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 670, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 670, + 504, + 704 + ], + "type": "text", + "content": "While average EM score provides some information about a model's multilingual abilities, the same EM score can hide many different patterns regarding individual language scores. As we appreciate it is not always practical to consider 31 separate EM scores in model development, we add a second" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 116, + 710, + 283, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 710, + 283, + 721 + ], + "spans": [ + { + "bbox": [ + 116, + 710, + 283, + 721 + ], + "type": "text", + "content": "7A metric cheatsheet can be found in Table 1." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 71, + 321, + 223 + ], + "blocks": [ + { + "bbox": [ + 109, + 71, + 321, + 223 + ], + "lines": [ + { + "bbox": [ + 109, + 71, + 321, + 223 + ], + "spans": [ + { + "bbox": [ + 109, + 71, + 321, + 223 + ], + "type": "image", + "image_path": "ba1c71502e3d1b62d80b80df10464eeef839823bfd41082afe99302cc0c1fa68.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 227, + 240, + 237 + ], + "lines": [ + { + "bbox": [ + 187, + 227, + 240, + 237 + ], + "spans": [ + { + "bbox": [ + 187, + 227, + 240, + 237 + ], + "type": "text", + "content": "(a) EM scores" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 325, + 73, + 504, + 223 + ], + "blocks": [ + { + "bbox": [ + 325, + 73, + 504, + 223 + ], + "lines": [ + { + "bbox": [ + 325, + 73, + 504, + 223 + ], + "spans": [ + { + "bbox": [ + 325, + 73, + 504, + 223 + ], + "type": "image", + "image_path": "cc7d7f7d7a0ea73f1bd888448d909a38821f0cdcc30c4807be351475caeba83c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 399, + 227, + 429, + 238 + ], + "lines": [ + { + "bbox": [ + 399, + 227, + 429, + 238 + ], + "spans": [ + { + "bbox": [ + 399, + 227, + 429, + 238 + ], + "type": "text", + "content": "(b) Gap" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 243, + 504, + 268 + ], + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 268 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 268 + ], + "type": "text", + "content": "Figure 1: EM distributions and Gap dev. (a) Boxplot of observed EM scores for each model, sorted by mean. (b) Difference between the best EM and the worst of the N next best EM scores, per model." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 287, + 504, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 287, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 287, + 504, + 312 + ], + "type": "text", + "content": "summary metric to the main metrics of MultiLoKo: the gap between the best and worst performing languages, representative of the extent to which a model has achieved parity across languages." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 315, + 506, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 315, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 506, + 426 + ], + "type": "text", + "content": "In Figure 1a, we already saw that the per-language scores have quite a range for all models. In Figure 1b, we study this in more detail, by considering the gap between the best language and the next N best language (30 corresponds to the full benchmark). On the right end of the plot, we see that already considering only 5 languages besides English, even the best perform has a gap of over five points - relatively large in absolute terms, very large in relative ones - between English and the worst of the remaining languages. For the second best two models, the top-5 gap even exceeds 10 points. As we include more languages, up to the full benchmark, the gap increases, with GPT4-0 showing gap of almost 50 points. The only models for which the gap is small are the models that have overall low performance and thus little space to drop from English, illustrating how gap and average EM provide complementary information about multilingual performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 437, + 266, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 437, + 266, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 266, + 449 + ], + "type": "text", + "content": "5.2 Generalisation across languages" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 457, + 506, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 457, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 506, + 480 + ], + "type": "text", + "content": "Next, we study whether knowledge generalises across languages or, in other words, whether knowledge transfers from one language to another." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 491, + 277, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 491, + 277, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 277, + 502 + ], + "type": "text", + "content": "5.2.1 The mother tongue effect (MTE)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "text", + "content": "First, we compare the EM of models when questions are asked in the language for which the questions were originally sourced with performance when the same questions are asked in English. We quantify this effect with the metric MTE, which expresses the difference in performance between these two settings (see § 3.3). In Figure 2a, we show MTE per language, averaged across models. For most languages, performance is higher when the question is asked in the language for which the question is locally relevant. The languages for which MTE is negative or close to 0 are virtually all languages that perform very poorly also in the mother tongue and for which there is therefore little room for further decrease. From one perspective, the improvements when questions are asked in the low-resource but native languages can be seen as surprising: as models perform much better in English than non-English languages, one may expect performances to go up as a consequence of that. On the other hand, similar 'mother tongue effects' have been observed in earlier studies. For example, Ohmer et al. (2024) found that models are comparatively better at answering factual questions about topics when they are asked in a language to which culture the fact pertains. It appears that also in our case," + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 104, + 660, + 504, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 660, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 660, + 504, + 700 + ], + "type": "text", + "content": "With the used prompt, we could not get Claude 3.5 Sonnet to answer questions in English in an automatically parsable manner, leading to an abysmal score of 4.8 on the English sourced data and equally low scores on data translated into English. Examples of this issue can be found in Appendix B. Because this issue is not indicative of lack of knowledge or transfer, we excluded Claude 3.5 Sonnet from any of the transfer results in this section." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 700, + 504, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 722 + ], + "type": "text", + "content": "The exception to this rule is Hindi, which has a reasonable performance in the native language but nevertheless improves in English. We further discuss such language-specific points in " + }, + { + "bbox": [ + 104, + 700, + 504, + 722 + ], + "type": "inline_equation", + "content": "\\S 5.3" + }, + { + "bbox": [ + 104, + 700, + 504, + 722 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 73, + 362, + 212 + ], + "blocks": [ + { + "bbox": [ + 108, + 73, + 362, + 212 + ], + "lines": [ + { + "bbox": [ + 108, + 73, + 362, + 212 + ], + "spans": [ + { + "bbox": [ + 108, + 73, + 362, + 212 + ], + "type": "image", + "image_path": "5e9488ee42bcffe9751990decbf76ca92024b4aaa74c5ac9b05c6f80f75e0642.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 177, + 216, + 294, + 226 + ], + "lines": [ + { + "bbox": [ + 177, + 216, + 294, + 226 + ], + "spans": [ + { + "bbox": [ + 177, + 216, + 294, + 226 + ], + "type": "text", + "content": "(a) Average MTE across models" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 386, + 215, + 474, + 225 + ], + "lines": [ + { + "bbox": [ + 386, + 215, + 474, + 225 + ], + "spans": [ + { + "bbox": [ + 386, + 215, + 474, + 225 + ], + "type": "text", + "content": "(b) KDE of MTE scores" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 371, + 76, + 492, + 188 + ], + "blocks": [ + { + "bbox": [ + 371, + 76, + 492, + 188 + ], + "lines": [ + { + "bbox": [ + 371, + 76, + 492, + 188 + ], + "spans": [ + { + "bbox": [ + 371, + 76, + 492, + 188 + ], + "type": "image", + "image_path": "02b5749be8a0b0cad9681d40427e51c88c0bc0e81cda587783d142cb140bf240.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 232, + 504, + 277 + ], + "lines": [ + { + "bbox": [ + 104, + 232, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 504, + 277 + ], + "type": "text", + "content": "Figure 2: Mother tongue effect dev. (a) Per language MTE for MultiLoKo dev, indicating the difference between questions asked in the mother tongue (locally relevant) and in English. Error bars indicate 2 times standard error across all models, excluding Claude 3.5 Sonnet. (b) KDE plot of the distribution of MTE scores for the top-3 performing models." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 301, + 504, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 504, + 335 + ], + "type": "text", + "content": "the effect of accessibility of information in a relevant language wins out over the generally stronger English performance, pointing to a gap in models' ability to generalise knowledge from one language to another." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 340, + 506, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 506, + 418 + ], + "type": "text", + "content": "In Figure 2b, we further consider the distribution of MTE scores for the top-3 models. Interestingly, this distribution is quite different between models. Despite having comparable average scores, the top-3 performing models differ in their MTE distributions across languages. Of the three models, GPT4-o has the smallest average effect (3.2); Llama 3.1 405B has a much higher average effect (6.6), but less probability mass on the more extreme ranges of the spectrum (min max values of " + }, + { + "bbox": [ + 104, + 340, + 506, + 418 + ], + "type": "inline_equation", + "content": "[-7, +12]" + }, + { + "bbox": [ + 104, + 340, + 506, + 418 + ], + "type": "text", + "content": " vs " + }, + { + "bbox": [ + 104, + 340, + 506, + 418 + ], + "type": "inline_equation", + "content": "[-9, +13]" + }, + { + "bbox": [ + 104, + 340, + 506, + 418 + ], + "type": "text", + "content": ") Gemini 2.0 Flash is in the middle in terms of average (6.3), but shows the largest variation across languages " + }, + { + "bbox": [ + 104, + 340, + 506, + 418 + ], + "type": "inline_equation", + "content": "[-10, +16]" + }, + { + "bbox": [ + 104, + 340, + 506, + 418 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 422, + 504, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 504, + 467 + ], + "type": "text", + "content": "Note, however, that without studying the actual training data of the various models, it is possible to infer that all these models have relatively poor transfer across languages, but not conclusively say that one model is better than another: it is also possible that the information sourced for languages with better MTEs was simply better represented in the English data of a respective model." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 483, + 261, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 261, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 261, + 495 + ], + "type": "text", + "content": "5.2.2 Consistency across responses" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 503, + 504, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 503, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 503, + 504, + 581 + ], + "type": "text", + "content": "Another way to study transfer between languages is to look at the consistency of responses across languages (previously also used by Qi et al., 2023; Ohmer et al., 2023, i.a.). After all, it is possible for a model that has an EM of 30 on both English and another language to be nevertheless completely misaligned on which questions they respond to correctly. Studying consistency across responses can therefore be seen as a more direct way of studying whether knowledge is equally accessible across languages. Furthermore, consistency can be studied independently from accuracy, as it is possible for a model to have very good transfer, but be simply consistently wrong." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 585, + 506, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 585, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 506, + 685 + ], + "type": "text", + "content": "In the dataset used by Ohmer et al. (2023), the correct answers (consisting of names, numbers and years) are identical across the languages they consider, while Qi et al. (2023) use a factual knowledge task that requires ranking outputs. Neither of their metrics can thus be directly applied in our case. Specifically, measuring consistency on incorrect responses – an important component of the work of Ohmer et al. (2023) because it can provide positive rather than negative evidence – would require assessing whether two answers in different languages are to be considered semantically equivalent, which is not practically feasible for our data. Rather, we opt for a simpler consistency metric, which quantifies what percentage of the questions that are answered correctly in either language are answered correctly in both languages." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "In Figure 3a, we show the average consistency of all models (excluding again Claude Sonnet 3.5); for completeness, we also show the per-language consistency results in Figure 3b. The results confirm our earlier conclusion that much improvements can be made when it comes to knowledge transfer" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 74, + 239, + 180 + ], + "blocks": [ + { + "bbox": [ + 108, + 74, + 239, + 180 + ], + "lines": [ + { + "bbox": [ + 108, + 74, + 239, + 180 + ], + "spans": [ + { + "bbox": [ + 108, + 74, + 239, + 180 + ], + "type": "table", + "html": "
ModelConsistency
Gemini 2.0 Flash0.46± 0.04
Llama 3.1 405B0.46± 0.04
Llama 3.1 70B0.45± 0.03
GPT4-o0.45± 0.05
Llama 3.1 405B Chat0.42± 0.04
Qwen2.5 72B0.40± 0.04
Llama 3.1 70B Chat0.40± 0.04
Mixtral 8x22B0.36± 0.05
Mixtral 8x22B-it0.21± 0.05
Qwen2.5 72B instruct0.08± 0.03
", + "image_path": "b0918ad445a1cd7b34ff620e97c14ae4d7e0eab4778d0e71993ce777ae50542b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 113, + 213, + 235, + 224 + ], + "lines": [ + { + "bbox": [ + 113, + 213, + 235, + 224 + ], + "spans": [ + { + "bbox": [ + 113, + 213, + 235, + 224 + ], + "type": "text", + "content": "(a) Consistency scores per model" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 247, + 73, + 502, + 210 + ], + "blocks": [ + { + "bbox": [ + 247, + 73, + 502, + 210 + ], + "lines": [ + { + "bbox": [ + 247, + 73, + 502, + 210 + ], + "spans": [ + { + "bbox": [ + 247, + 73, + 502, + 210 + ], + "type": "image", + "image_path": "4292a85211fc2019ae5f9eb0d70959896e6e3c18041a561b5072bafe1218ace8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 213, + 440, + 224 + ], + "lines": [ + { + "bbox": [ + 309, + 213, + 440, + 224 + ], + "spans": [ + { + "bbox": [ + 309, + 213, + 440, + 224 + ], + "type": "text", + "content": "(b) Consistency scores per language" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 230, + 504, + 264 + ], + "lines": [ + { + "bbox": [ + 104, + 230, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 504, + 264 + ], + "type": "text", + "content": "Figure 3: Consistency results dev. (a) Average per-model consistency scores, " + }, + { + "bbox": [ + 104, + 230, + 504, + 264 + ], + "type": "inline_equation", + "content": "\\pm 2" + }, + { + "bbox": [ + 104, + 230, + 504, + 264 + ], + "type": "text", + "content": " times the standard error across languages. (b) Boxplot of model consistency scores per language, indicating the relative overlap of correctly answered questions when asked in the mother tongue vs in English." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 285, + 504, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 504, + 309 + ], + "type": "text", + "content": "between languages: even for the best performing models, there is an overlap of not even " + }, + { + "bbox": [ + 104, + 285, + 504, + 309 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 104, + 285, + 504, + 309 + ], + "type": "text", + "content": " between the questions correctly answered across languages." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 321, + 261, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 321, + 261, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 261, + 333 + ], + "type": "text", + "content": "5.3 Differences between languages" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 341, + 504, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 341, + 504, + 364 + ], + "spans": [ + { + "bbox": [ + 104, + 341, + 504, + 364 + ], + "type": "text", + "content": "So far, with the exception of MTE and parity scores, we have primarily looked at results averaged across languages. Now, we consider language-specific results in a bit more detail." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 108, + 376, + 501, + 608 + ], + "blocks": [ + { + "bbox": [ + 108, + 376, + 501, + 608 + ], + "lines": [ + { + "bbox": [ + 108, + 376, + 501, + 608 + ], + "spans": [ + { + "bbox": [ + 108, + 376, + 501, + 608 + ], + "type": "image", + "image_path": "5a773333e4ab35574de8f34ba1e9ec4e4d7fb66080d19020a6de0b49c9e62b74.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 616, + 504, + 640 + ], + "lines": [ + { + "bbox": [ + 104, + 616, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 504, + 640 + ], + "type": "text", + "content": "Figure 4: Average EM per language dev, in mother tongue and English. Top: Average EM on locally sourced data. Bottom: Average EM on locally sourced data, translated to English." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 658, + 322, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 322, + 671 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 322, + 671 + ], + "type": "text", + "content": "5.3.1 Language difficulty on locally sourced data" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "First, in Figure 4 (top), we show average model results for all languages on all locally sourced data. In broad strokes, the order of difficulty is correlated with how low- or high- resource a language is to be considered: while languages such as French, English and Spanish occur at the easier end of the spectrum, we find Farsi, Khmer and Malay among the most difficult languages. There are a few" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 114, + 72, + 355, + 205 + ], + "blocks": [ + { + "bbox": [ + 114, + 72, + 355, + 205 + ], + "lines": [ + { + "bbox": [ + 114, + 72, + 355, + 205 + ], + "spans": [ + { + "bbox": [ + 114, + 72, + 355, + 205 + ], + "type": "image", + "image_path": "e5f1a6301f9eacb88d277334073c9d3bc06a6bceaf152447d2047710024fbe06.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 206, + 293, + 217 + ], + "lines": [ + { + "bbox": [ + 179, + 206, + 293, + 217 + ], + "spans": [ + { + "bbox": [ + 179, + 206, + 293, + 217 + ], + "type": "text", + "content": "(a) Locality effect per language" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 222, + 504, + 277 + ], + "lines": [ + { + "bbox": [ + 104, + 222, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 222, + 504, + 277 + ], + "type": "text", + "content": "Figure 5: Locality Effect dev. (a) Per language Locality Effect, indicating the difference in assigned scores between locally sourced and translated English data. A positive LE means the locally sourced data has a higher score (is easier), a negative LE the English sourced data has a higher score. (b) Per-model rank correlation between language difficulty of languages on locally sourced vs English translated data." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 369, + 76, + 488, + 173 + ], + "blocks": [ + { + "bbox": [ + 369, + 76, + 488, + 173 + ], + "lines": [ + { + "bbox": [ + 369, + 76, + 488, + 173 + ], + "spans": [ + { + "bbox": [ + 369, + 76, + 488, + 173 + ], + "type": "table", + "html": "
ModelRank correlation language difficulty
Gemini 2.0 Flash0.54
Llama 3.1 405B0.65
GPT4-o0.64
Llama 3.1 405B Chat0.70
Llama 3.1 70B0.60
Claude 3.5 Sonnet0.84
Llama 3.1 70B Chat0.68
Mixtral 8x22B0.86
Qwen2.5 72B0.45
Mixtral 8x22B-it0.88
Qwen2.5 72B instruct0.55
", + "image_path": "5d86d006a27cdf31406eb72a1b910b6a9d13150c13b3ee9064c1db294d2d41f2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 365, + 205, + 494, + 217 + ], + "lines": [ + { + "bbox": [ + 365, + 205, + 494, + 217 + ], + "spans": [ + { + "bbox": [ + 365, + 205, + 494, + 217 + ], + "type": "text", + "content": "(b) Language difficulty correlations" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 300, + 504, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 504, + 344 + ], + "type": "text", + "content": "notable exceptions: on average the second highest scoring language in our benchmark is Tagalog. While it is difficult to judge why without doing a detailed analysis on the questions, we hypothesise that the questions asked by the Tagalog language experts are simply less complex than the questions of other languages." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 356, + 379, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 356, + 379, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 379, + 369 + ], + "type": "text", + "content": "5.3.2 Separating language difficulty from language proficiency" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 375, + 506, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 375, + 506, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 375, + 506, + 496 + ], + "type": "text", + "content": "In an attempt to distinguish data difficulty from language proficiency, we consider also the difficulty of the locally sourced data translated to English. While this conflates data difficulty and transfer (see § 5.2), it still gives us some indication of the extent to which low performance in languages is caused by poor language proficiency versus data difficulty. In the bottom half of Figure 4, we show the model performances as computed on the locally sourced data translated to English. The correlation between these two language difficulty rankings between these setups is 0.79. When comparing the ranks of the various languages, only a handful of languages shift more than a few places. Specifically, Bengali " + }, + { + "bbox": [ + 104, + 375, + 506, + 496 + ], + "type": "inline_equation", + "content": "(26->4)" + }, + { + "bbox": [ + 104, + 375, + 506, + 496 + ], + "type": "text", + "content": ", Urdu " + }, + { + "bbox": [ + 104, + 375, + 506, + 496 + ], + "type": "inline_equation", + "content": "(26->12)" + }, + { + "bbox": [ + 104, + 375, + 506, + 496 + ], + "type": "text", + "content": ", and Hindi " + }, + { + "bbox": [ + 104, + 375, + 506, + 496 + ], + "type": "inline_equation", + "content": "(14->5)" + }, + { + "bbox": [ + 104, + 375, + 506, + 496 + ], + "type": "text", + "content": " all decrease substantially in difficulty rank. The fact that they are comparatively easier in English suggests that for those languages proficiency may be a larger problem than data difficulty. On the other hand, only Russian " + }, + { + "bbox": [ + 104, + 375, + 506, + 496 + ], + "type": "inline_equation", + "content": "(7->21)" + }, + { + "bbox": [ + 104, + 375, + 506, + 496 + ], + "type": "text", + "content": " shows a drop of more than 5 places." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 509, + 180, + 520 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 509, + 180, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 509, + 180, + 520 + ], + "type": "text", + "content": "5.4 The dataset" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 529, + 504, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 504, + 574 + ], + "type": "text", + "content": "Lastly, we discuss two aspects related to the creation of the dataset. Specifically, we consider the impact of local sourcing vs translated English data, and we have a look at the dataset split across dev and test. We consider the difference between using human-authored as opposed to machine-authored translations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 586, + 344, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 344, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 344, + 597 + ], + "type": "text", + "content": "5.4.1 Locally-sourced vs translated-from-English data" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "type": "text", + "content": "To study the impact of using locally sourced data, we consider the difference between per-language EM on locally sourced data and translated from English data." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 639, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 506, + 696 + ], + "type": "text", + "content": "Language difficulty First, we look at per-language differences between locally sourced and translated English data. We quantify this difference in a metric we call the Locality Effect (LE). The size of the locality effect tells us how much the estimate of a model's strength in a particular language would have been off if we had chosen to use a translated benchmark rather than a locally sourced one. We plot this difference in Figure 5a." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": "As we can see, the scores between locally and translated English-sourced data can differ quite drastically, almost 15 percentage points averaged across models. For individual models, the differences are" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": "even larger. For Llama 3.1 405B, the locality effect ranges from -13 to +17; for Gemini 2.0 Flash from -21 to +15; and for GPT4-o from -22 to +14. The differences are not just in absolute scores; also the ordering of language by difficulty is quite different across the two data collection setups, as can be seen by the per-model rank correlations of language difficulty between the two conditions, shown in Figure 5b. Using English-translated rather than locally sourced data does thus not only provide different estimates, but may suggest different languages to focus on for improvement." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "text", + "content": "Model rankings Next, we consider the ranking of the models under the two different data regimes. Interestingly, given the transfer effect, changing from locally to English translated data does not make any difference in the ranking. Also in terms of absolute scores, the difference between the two data collection setups is relatively minor. At least for our type of data, it thus appears that using translated data as opposed to locally sourced data may be a reasonable setup for comparing models on average, though not for getting adequate per-language or set language prioritisation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 227, + 209, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 227, + 209, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 209, + 239 + ], + "type": "text", + "content": "5.4.2 The dataset split" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 246, + 504, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 246, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 246, + 504, + 344 + ], + "type": "text", + "content": "As mentioned in the dataset construction, we took the deliberate decision to generate a split based on topic frequency, rather than creating a random split. The aim of this out-of-distribution split is to test generalisation to topics that are more in the tail of the distribution, as well as encourage improvements in multilinguality beyond having a higher score on the specific released MultiLoKo dev set. Of course, however, because of our sourcing method, all the topics in MultiLoKo are topics on which information is available on Wikipedia. As training data, Wikipedia is often packaged as a single scrape, this may render our deliberate splitting efforts futile: the fact that a page is less visited does not make it less likely that the specific page is included in the training data. Now, we test if the dev and test split are in fact distributionally different." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 111, + 357, + 501, + 541 + ], + "blocks": [ + { + "bbox": [ + 111, + 357, + 501, + 541 + ], + "lines": [ + { + "bbox": [ + 111, + 357, + 501, + 541 + ], + "spans": [ + { + "bbox": [ + 111, + 357, + 501, + 541 + ], + "type": "image", + "image_path": "b801df6ef4552fe7a4acd00f70e7b0057dc62f87938a607d60d51710da9f7fbd.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 551, + 504, + 584 + ], + "lines": [ + { + "bbox": [ + 104, + 551, + 504, + 584 + ], + "spans": [ + { + "bbox": [ + 104, + 551, + 504, + 584 + ], + "type": "text", + "content": "Figure 6: Average EM, dev versus test. We show the difference in score distributions between the MultiLoKo dev and test set. The results confirm that the test set is indeed out of distribution with respect to the dev set: dev scores (upper bars) are higher across the board." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 596, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 504, + 662 + ], + "type": "text", + "content": "In Figure 6, we show boxplots of dev and test EM scores for all models under consideration. The plot confirms that the split is indeed to be considered an OOD split: for virtually much all models, the test scores are lower than the dev scores. Across all models, the average dev score is 24, whereas the average test score is 21. This suggests that our test set does indeed contain more tail knowledge than the dev set, despite the aforementioned arguments regarding Wikipedia. Interestingly, this implies that Wikipedia may not be the primary source from which models learn this information." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "text", + "content": "The difference in difficulty also has bearing on the other metrics: the parity scores (thus: the gap between the best and worst performing language) is 37 for dev vs 34 for test, suggesting that more difficult dat may to some extent hide differences between languages and therefore exemplifying the utility of considering parity along with overall performance. The mother tongue effect, on the other hand, is comparable across dev and test (1.61 vs 1.56, respectively). For the locality effect, the" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 105 + ], + "type": "text", + "content": "effect is less interpretable. While the average difference is substantial (-0.6 dev vs -1.9 test), there is no clear pattern discernable across languages: for some, the effect reduces, whereas for others it increases." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 110, + 125, + 248, + 205 + ], + "blocks": [ + { + "bbox": [ + 110, + 125, + 248, + 205 + ], + "lines": [ + { + "bbox": [ + 110, + 125, + 248, + 205 + ], + "spans": [ + { + "bbox": [ + 110, + 125, + 248, + 205 + ], + "type": "table", + "html": "
ModelRmin Δmax Δavg Δ
Gemini 2.0 Flash0.80-10.0021.604.35
Llama 3.1 405B0.83-4.4018.805.82
GPT4-o0.85-6.0021.604.46
Llama 3.1 405B Chat0.80-10.4022.403.08
Llama 3.1 70B0.77-7.6022.004.59
Claude 3.5 Sonnet0.90-9.6020.802.84
Llama 3.1 70B Chat0.87-6.0020.003.12
Mixtral 8x22B0.91-3.2020.004.13
Qwen2.5 72B0.83-4.0016.803.47
Mixtral 8x22B-it0.92-4.8012.402.41
Qwen2.5 72B instruct0.80-0.803.200.36
", + "image_path": "07d531440daa7d359f135908c66b8f86aff2111a8ae67e0aa29fd27c7f2f0eeb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 263, + 118, + 487, + 240 + ], + "blocks": [ + { + "bbox": [ + 263, + 118, + 487, + 240 + ], + "lines": [ + { + "bbox": [ + 263, + 118, + 487, + 240 + ], + "spans": [ + { + "bbox": [ + 263, + 118, + 487, + 240 + ], + "type": "image", + "image_path": "58b5ac41956d6eb877cd3c5fb7488a56cf7367f079130be27c34bff2cd0898d3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 320, + 248, + 429, + 258 + ], + "lines": [ + { + "bbox": [ + 320, + 248, + 429, + 258 + ], + "spans": [ + { + "bbox": [ + 320, + 248, + 429, + 258 + ], + "type": "text", + "content": "(b) MT vs human translations" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 265, + 504, + 310 + ], + "lines": [ + { + "bbox": [ + 104, + 265, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 504, + 310 + ], + "type": "text", + "content": "Figure 7: Machine versus human translations dev. (a) Per-model rank correlation between language difficulty between MT and human translations, and min, max and average difference between the two conditions. (b) Difference between EM computed on human- and machine-translated data (human score - machine score), per language." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 238, + 252, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 238, + 252, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 238, + 252, + 258 + ], + "type": "text", + "content": "(a) Language difficulty stats across human- and machine translations" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 330, + 288, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 330, + 288, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 330, + 288, + 340 + ], + "type": "text", + "content": "5.4.3 Human versus machine translation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "content": "Lastly, we consider the impact of using machine- or human-authored translations. To do so, we look at the differences in EM scores between machine and human translated data for the various languages, taking the human translations as the 'gold standard' (i.e. we consider human translated EM - machine translated EM). We show the results in Figure 7." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 397, + 505, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 505, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 505, + 485 + ], + "type": "text", + "content": "In Figure 7a we show the rank correlations of the difficulties of the various languages per model, as well as the min, max and average drop from human to machine translations. We see the that, at the model level, using machine translations rather than human translations results in a systematic undervaluation of the model scores: there is not a single model for which the 'drop' from human to machine translations is negative on average. In part, this is may be a result of the previously observed lack of knowledge transfer effect. That the drop is not substantially lower for models with better transfer, however, suggests that the more impactful factor is the quality of the machine translations, that may at times result in unanswerable questions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 491, + 504, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 504, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 504, + 545 + ], + "type": "text", + "content": "In terms of model rankings, the difference between machine and human translations is minor: the model rankings between the two conditions have a rank correlation of 0.97 on the dev split, with only three local swaps (2&3 and 5&6 and 8&9) of models that did not have statistically different scores to begin with. This suggests that to compare models, using machine translation can be an acceptable alternative to human translations, as the mis-estimation is systematic across models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 550, + 505, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 505, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 505, + 616 + ], + "type": "text", + "content": "Considering the effect across languages, we observe that even though the average drop is positive, for virtually all models there are at least some languages for which performance increases when MT is used, in some cases with even more than 10 points. For a handful of languages - specifically Russian, Swedish and Urdu - this is also true across models (see Figure 7b). While the overall rank correlation is high for language difficulty (0.88), it thus still urges caution in using machine translated data for language improvement prioritisation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 632, + 184, + 644 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 632, + 184, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 184, + 644 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 655, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 506, + 723 + ], + "type": "text", + "content": "Notwithstanding the increasing multinational deployment of LLMs in many parts of the world, adequately evaluating their multilinguality remains a challenging enterprise. Only in part is this due to the scarcity of high-quality and broad-coverage multilingual benchmarks for LLM: perhaps a more pressing issue is that the benchmarks that are frequently used for multilingual evaluation virtually all consist of translated English data. While using completely parallel data has its advantages, using translated English data imposes an English-centric bias on the content of the benchmarks," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "type": "text", + "content": "implying that even if the benchmark evaluates multilinguality on the surface, it does not in content. In our work, we aim to address this by presenting MultiLoKo, a multilingual benchmark spanning 31 languages that combines the best of both worlds. MultiLoKo contains 500 questions targeting locally relevant knowledge for 31 languages, separately sourced for each language with a protocol specifically designed to ensure local relevance of the question topics. It is also fully parallel, because it contains human-authored translations of the non-English partitions into English and vice versa. As such, it allows to study various questions related to multilinguality, transfer and multilingual benchmark creation. To prevent quick overfitting and inadvertent contamination, we release a development set of the benchmark, while the test set of the benchmarks remains private, at least for the near future." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 176, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 176, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 176, + 506, + 266 + ], + "type": "text", + "content": "We use MultiLoKo to analyse 4 base and 7 chat models marketed to be multilingual. We find that, of the models we consider, the best performing model is Gemini 2.0 Flash, with an average performance of 34.4 points, and an almost 35 point gap between the best and the worst language, followed by Llama 3.1 405B and GPT4-o, which are close contenders in terms of average performance but both have substantially higher language gaps (39 and 49 points). Generally, scores are better when questions are asked in the language to which they are relevant, indicating suboptimal knowledge transfer between languages, a result that is mirrored by low per-sample consistency across question language." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 269, + 506, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 506, + 400 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 506, + 400 + ], + "type": "text", + "content": "On a meta-level, we study the relevance of using locally sourced data as opposed to translated English data as well as whether it matters if translations are authored by humans or machines. We find that the estimated difficulty of some languages changes drastically across the two sourcing setups, within the range of 15 points decrease and 8 points increase on average across models. The rank correlation between average language difficulty score is 0.78. Furthermore, individual model scores between locally and English-translated data can differ up to 22 points for some languages. However, changing the sourcing setup does not impact model rankings, suggesting that using translated data may be suitable for comparing models but less for model development or language prioritisation. For using machine- instead of human-authored translations, as well, the effect on model ranking is limited " + }, + { + "bbox": [ + 104, + 269, + 506, + 400 + ], + "type": "inline_equation", + "content": "(\\mathrm{R} = 0.97)" + }, + { + "bbox": [ + 104, + 269, + 506, + 400 + ], + "type": "text", + "content": ", but the difficulty estimates of various languages changes with up to 12 points. Furthermore, using machine translated data results in lower average scores for all models, with drops ranging from 2 to " + }, + { + "bbox": [ + 104, + 269, + 506, + 400 + ], + "type": "inline_equation", + "content": "34\\%" + }, + { + "bbox": [ + 104, + 269, + 506, + 400 + ], + "type": "text", + "content": " of the human-translated scores." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 405, + 506, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 405, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 405, + 506, + 495 + ], + "type": "text", + "content": "While our results section is extensive already, there are still several parts of MultiLoKo that we did not explore. For instance, because of the sourcing strategy, each native question is coupled with a paragraph that contains the answer to the question. MultiLoKo could thus be transformed into a reading-comprehension benchmark, and we consider studying the difference between the knowledge and reading comprehension setup an interesting direction for future work. Furthermore, each question contains an elaborate long answer intended to explain the short answer. We have not used the long answers in any of our experiments, but foresee interesting directions including studies into CoT prompting or studying answer rationales." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 512, + 187, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 512, + 187, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 187, + 525 + ], + "type": "text", + "content": "7 Limitations" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 538, + 358, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 538, + 358, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 538, + 358, + 550 + ], + "type": "text", + "content": "In this last section, we discuss various limitations of our work." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 564, + 506, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 564, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 564, + 506, + 620 + ], + "type": "text", + "content": "Local relevance In our sourcing protocol, we explicitly sought to create questions locally relevant to the respective languages. It is important to notice, however, that some languages, such as English, Spanish, Portuguese, Chinese, French and to a lesser extent German and Dutch cover a wide variety of cultures. We did not separately control for that and the data for those languages thus likely comprises a mix of different locales." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": "Data quality Building a bias-free evaluation datasets with few mistakes is not an easy feat. Even though we implemented several rounds of quality checks in our data collection pipeline, when looking at outputs we still incidentally found mistakes in the data or answers. We fixed some of these mistakes as we encountered them, but it is quite likely that more such mistakes occur in the dataset. It is also important to point out that we are less likely to spot such issues for languages that we do not understand at all, potentially creating a bias towards the set of languages for which we have a rudimentary understanding. Overall, however, we believe that the pipeline we designed assures a dataset of high quality. Of course, we welcome reports of mistakes spotted by others in the data." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 72, + 504, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 72, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 107, + 72, + 504, + 236 + ], + "type": "text", + "content": "Evaluation Because MultiLoKo is a generative benchmark, computing scores requires comparisons of a generated answer with a set of gold answers. A first obstacle to this method of evaluation is that it is hard to create an exhaustive list of correct short-form answers. This is especially true when the correct answer is not a number, date, title or something else that can be expressed only in a few ways. In addition to that, it is hard to incentivise LLMs to produce concise answers. Even when instructed to answer with only a number / date / name / title, they may respond with a full sentence, add a reasoning trail to their answer, or add words beyond the minimal answer in a different fashion. We addressed such issues that were systematic in post-processing (see Appendix B), but it is hard to a priori catch allthe ways that LLMs may deviate from the requested protocols. In some cases, we found additional post-processing steps that increased the scores of some models only later in the process, because scores for particular languages looked suspiciously low. For instance, we had not initially realised that our punctuation stripper did not strip punctuation in Urdu, which specifically influenced GPT4-o and Gemini. We considered several other metrics as well as judges, but eventually found that EM provided the clearest and least biased signal. It remains, however, a challenge to evaluate chatty LLMs completely independently from their ability to follow instructions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 255, + 504, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 255, + 504, + 385 + ], + "spans": [ + { + "bbox": [ + 107, + 255, + 504, + 385 + ], + "type": "text", + "content": "Wikipedia as information source MultiLoKo, as several other both multilingual as well as monolingual benchmarks, uses Wikipedia as main source of information. This has the advantage that Wikipedia has a large coverage across many different languages and the information is considered to be of high quality. It also facilitates comparable sourcing across languages. Of course, it also poses limitations. For one, it still provides a bias to the specific topics that can be included, that are usually primarily knowledge based. In fact, MultiLoKo is indeed a knowledge benchmark; it does not consider other types of skills. Secondly, and perhaps more importantly, Wikipedia is a corpus frequently used in the training data of LLMs. The fact that MultiLoKo is a challenging benchmark even given that (multilingual) wikipedia is likely included in the training data of most of the LLMs evaluated suggests that this is not a large issue at the moment. However, it is very possible that MultiLoKo can be 'hacked' relatively easily simply by strongly oversampling multilingual wikipedia data." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 409, + 206, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 409, + 206, + 423 + ], + "spans": [ + { + "bbox": [ + 107, + 409, + 206, + 423 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 437, + 504, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 437, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 107, + 437, + 504, + 525 + ], + "type": "text", + "content": "While this paper knows only two authors, this benchmark would not have been possible without the support and contributions of many people. We wish to thank all of them in this last section. First, we thank Van Phung, Kriz Chan, Antonio Gai, Dunant Hin and Emily Du for their support on facilitating and streamlining interactions with vendors for the data collection process, and Milena Hoffman for her indispensable administrative support in managing the data collection process. We would furthermore like to thank Van Phung and Kriz Chan for their continued help on ensuring data quality, saliency checking output, brainstorming and general support throughout the creation of the benchmark." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 529, + 504, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 529, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 107, + 529, + 504, + 673 + ], + "type": "text", + "content": "We also thank the linguists that helped us for their contributions to the analysis of the pilot questions in the benchmark, which played an important role in finetuning and improving our annotation protocol as well as disregard inappropriate questions, and for helping us design prompt templates to allow language-specific querying of models in different stages for each of the languages in MultiLoKo. Specifically, we would like to thank Abdul Haque (Urdu), Aleksandra Antokhina (Russian), Ananya Banerjee (Bengali), Firman Tahar (Indonesian), Florian Mouret (French), Francisco Paredes Maldonado (Spanish), Eriko Nakamura (Japanese), Julie Lee (Korean), Khanh Tien (Vietnamese), Miao Yeh (Traditional Chinese), Renata Barboza (Portuguese), Rishabh Goel (Hindi), Sanket Suhas Satope (Marathi), Sara Martellini (Italian) and Silvia Aponte (German). We thank Kriz Chan by streamlining our collaboration with these linguists, and Maria Paez Playa for offering her teams time on this enterprise. We furthermore thank Sabrina Qiao for providing resources for quick-turnaround QA support, and Ateeq Awan (English), Kaila Conley-Coversi (Italian), Semanti Roy (Bengali) and Shahmir Shaikh (English) for delivering this QA support." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 677, + 503, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 677, + 503, + 721 + ], + "spans": [ + { + "bbox": [ + 107, + 677, + 503, + 721 + ], + "type": "text", + "content": "As doing manual saliency checks is challenging for a multilingual benchmark, we also relied on the help of several colleagues to debug small issues, detect errors in questions and prompts and double check annotation judgements. We would like to thank Anna Prochowska, Daria Dudurca, Diego Perino, Etai Sella, Ivan John Piramide, Lovish Madaan, Yanir Kleiman for their help on this." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 742, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 742, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 742, + 310, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 91, + 507, + 723 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 106, + 91, + 507, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 91, + 507, + 180 + ], + "spans": [ + { + "bbox": [ + 106, + 91, + 507, + 180 + ], + "type": "text", + "content": "Fakhraddin Alwajih, Gagan Bhatia, and Muhammad Abdul-Mageed. Dallah: A dialect-aware multimodal large language model for Arabic. In Nizar Habash, Houda Bouamor, Ramy Eskander, Nadi Tomeh, Ibrahim Abu Farha, Ahmed Abdelali, Samia Touileb, Injy Hamed, Yaser Onaizan, Bashar Alhafni, Wissam Antoun, Salam Khalifa, Hatem Haddad, Imed Zitouni, Badr AlKhamissi, Rawan Almatham, and Khalil Mrini, editors, Proceedings of The Second Arabic Natural Language Processing Conference, pages 320-336, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.arabicnlp-1.27. URL https://aclanthology.org/2024.arabicnlp-1.27/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 189, + 507, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 189, + 507, + 211 + ], + "spans": [ + { + "bbox": [ + 106, + 189, + 507, + 211 + ], + "type": "text", + "content": "Anthropic. Claude 3.5 sonnet. https://www.anthropic.com/news/claude-3-5-sonnet, 2025. Accessed: 2025-04-11." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 220, + 507, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 220, + 507, + 277 + ], + "spans": [ + { + "bbox": [ + 106, + 220, + 507, + 277 + ], + "type": "text", + "content": "Mikel Artetxe, Sebastian Ruder, and Dani Yogatama. On the cross-lingual transferability of monolingual representations. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault, editors, Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4623–4637, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.421. URL https://aclanthology.org/2020.acl-main.421/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 286, + 507, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 286, + 507, + 365 + ], + "spans": [ + { + "bbox": [ + 106, + 286, + 507, + 365 + ], + "type": "text", + "content": "Lucas Bandarkar, Davis Liang, Benjamin Muller, Mikel Artetxe, Satya Narayan Shukla, Donald Husa, Naman Goyal, Abhinandan Krishnan, Luke Zettlemoyer, and Madian Khabsa. The belebele benchmark: a parallel reading comprehension dataset in 122 language variants. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 749-775, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.44. URL https://aclanthology.org/2024.acl-long.44/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 373, + 507, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 373, + 507, + 494 + ], + "spans": [ + { + "bbox": [ + 106, + 373, + 507, + 494 + ], + "type": "text", + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin, editors, Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/1457c0d6bcbd4967418bf8ac142f64a-Abstract.html." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 504, + 507, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 504, + 507, + 538 + ], + "spans": [ + { + "bbox": [ + 106, + 504, + 507, + 538 + ], + "type": "text", + "content": "Pinzhen Chen, Simon Yu, Zhicheng Guo, and Barry Haddow. Is it good data for multilingual instruction tuning or just bad multilingual evaluation for large language models? CoRR, abs/2406.12822, 2024. URL https://doi.org/10.48550/arXiv.2406.12822." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 547, + 507, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 547, + 507, + 582 + ], + "spans": [ + { + "bbox": [ + 106, + 547, + 507, + 582 + ], + "type": "text", + "content": "Zhihong Chen, Shuo Yan, Juhao Liang, Feng Jiang, Xiangbo Wu, Fei Yu, Guiming Hardy Chen, Junying Chen, Hongbo Zhang, Li Jianquan, et al. Multilingualsift: Multilingual supervised instruction fine-tuning, 2023. URL https://arxiv.org/pdf/2412.15115." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 590, + 507, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 590, + 507, + 645 + ], + "spans": [ + { + "bbox": [ + 106, + 590, + 507, + 645 + ], + "type": "text", + "content": "Jonathan H. Clark, Eunsol Choi, Michael Collins, Dan Garrette, Tom Kwiatkowski, Vitaly Nikolaev, and Jennimaria Palomaki. TyDi QA: A benchmark for information-seeking question answering in typologically diverse languages. Transactions of the Association for Computational Linguistics, 8: 454-470, 2020. doi: 10.1162/tacl_a_00317. URL https://aclanthology.org/2020.tacl-1. 30/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 655, + 507, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 655, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 655, + 507, + 723 + ], + "type": "text", + "content": "Alexis Conneau, Rudy Rinott, Guillaume Lample, Adina Williams, Samuel Bowman, Holger Schwenk, and Veselin Stoyanov. XNLI: Evaluating cross-lingual sentence representations. In Ellen Riloff, David Chiang, Julia Hockenmaier, and Jun'ichi Tsujii, editors, Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2475–2485, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1269. URL https://aclanthology.org/D18-1269/." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 107, + 72, + 506, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 72, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 107, + 72, + 506, + 270 + ], + "type": "text", + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurélien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Rozière, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Allonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Grégoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel M. Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, and et al. The llama 3 herd of models. CoRR, abs/2407.21783, 2024. doi: 10.48550/ARXIV.2407.21783. URL https://doi.org/10.48550/arXiv.2407.21783." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 277, + 506, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 277, + 506, + 376 + ], + "spans": [ + { + "bbox": [ + 107, + 277, + 506, + 376 + ], + "type": "text", + "content": "Alena Fenogenova, Artem Chervyakov, Nikita Martynov, Anastasia Kozlova, Maria Tikhonova, Albina Akhmetgareeva, Anton Emelyanov, Denis Shevelev, Pavel Lebedev, Leonid Sinev, Ulyana Isaeva, Katerina Kolomeytseva, Daniil Moskovskiy, Elizaveta Goncharova, Nikita Savushkin, Polina Mikhailova, Anastasia Minaeva, Denis Dimitrov, Alexander Panchenko, and Sergey Markov. MERA: A comprehensive LLM evaluation in Russian. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9920–9948, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.534. URL https://aclanthology.org/2024.acl-long.534/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 384, + 506, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 384, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 506, + 417 + ], + "type": "text", + "content": "Google DeepMind. Google gemini ai update - December 2024. https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/, 2024. Accessed: 2025-04-11." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 425, + 505, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 425, + 505, + 480 + ], + "spans": [ + { + "bbox": [ + 107, + 425, + 505, + 480 + ], + "type": "text", + "content": "Naman Goyal, Cynthia Gao, Vishrav Chaudhary, Peng-Jen Chen, Guillaume Wenzek, Da Ju, Sanjana Krishnan, Marc'Aurelio Ranzato, Francisco Guzmán, and Angela Fan. The Flores-101 evaluation benchmark for low-resource and multilingual machine translation. Transactions of the Association for Computational Linguistics, 10:522-538, 2022. doi: 10.1162/tacl_a_00474. URL https://aclanthology.org/2022.tacl-1.30/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 487, + 506, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 487, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 107, + 487, + 506, + 564 + ], + "type": "text", + "content": "Momchil Hardalov, Todor Mihaylov, Dimitrina Zlatkova, Yoan Dinkov, Ivan Koychev, and Preslav Nakov. EXAMS: A multi-subject high school examinations dataset for cross-lingual and multilingual question answering. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5427-5444, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.438. URL https://aclanthology.org/2020.emnlp-main.438/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 572, + 506, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 572, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 107, + 572, + 506, + 639 + ], + "type": "text", + "content": "Yun He, Di Jin, Chaoqi Wang, Chloe Bi, Karishma Mandyam, Hejia Zhang, Chen Zhu, Ning Li, Tengyu Xu, Hongjiang Lv, Shruti Bhosale, Chenguang Zhu, Karthik Abinav Sankararaman, Eryk Helenowski, Melanie Kambadur, Aditya Tayade, Hao Ma, Han Fang, and Sinong Wang. Multi-if: Benchmarking llms on multi-turn and multilingual instructions following. CoRR, abs/2410.15553, 2024. doi: 10.48550/ARXIV.2410.15553. URL https://doi.org/10.48550/arXiv.2410.15553." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 647, + 506, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 647, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 107, + 647, + 506, + 681 + ], + "type": "text", + "content": "Dieuwke Hupkes, Mario Giulianielli, Verna Dankers, Mikel Artetxe, Yanai Elazar, Tiago Pimentel, Christos Christodoulopoulos, Karim Lasri, Naomi Saphra, Arabella Sinclair, et al. A taxonomy and review of generalization research in nlp. Nature Machine Intelligence, 5(10):1161-1174, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 689, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 689, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 107, + 689, + 506, + 722 + ], + "type": "text", + "content": "Zhengbao Jiang, Antonios Anastasopoulos, Jun Araki, Haibo Ding, and Graham Neubig. X-FACTR: Multilingual factual knowledge retrieval from pretrained language models. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 115, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 72, + 505, + 106 + ], + "type": "text", + "content": "Methods in Natural Language Processing (EMNLP), pages 5943-5959, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.479. URL https://aclanthology.org/2020.emnlp-main.479/." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 114, + 505, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 114, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 107, + 114, + 505, + 148 + ], + "type": "text", + "content": "Jaap Jumelet, Leonie Weissweiler, and Arianna Bisazza. Multiblimp 1.0: A massively multilingual benchmark of linguistic minimal pairs. CoRR, abs/2504.02768, 2025. URL https://doi.org/10.48550/arXiv.2504.02768." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 156, + 506, + 221 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 156, + 506, + 221 + ], + "spans": [ + { + "bbox": [ + 106, + 156, + 506, + 221 + ], + "type": "text", + "content": "Nora Kassner, Philipp Duffer, and Hinrich Schütze. Multilingual LAMA: Investigating knowledge in multilingual pretrained language models. In Paola Merlo, Jorg Tiedemann, and Reut Tsarfaty, editors, Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 3250-3258, Online, April 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.eacl-main.284. URL https://aclanthology.org/2021.eacl-main.284/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 230, + 506, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 230, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 106, + 230, + 506, + 297 + ], + "type": "text", + "content": "Fajri Koto, Nurul Aisyah, Haonan Li, and Timothy Baldwin. Large language models only pass primary school exams in Indonesia: A comprehensive test on IndoMMLU. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12359-12374, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.760. URL https://aclanthology.org/2023.emnlp-main.760/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 304, + 506, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 304, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 106, + 304, + 506, + 361 + ], + "type": "text", + "content": "Patrick Lewis, Barlas Oguz, Rudy Rinnott, Sebastian Riedel, and Holger Schwenk. MLQA: Evaluating cross-lingual extractive question answering. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault, editors, Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7315–7330, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.653. URL https://aclanthology.org/2020.acl-main.653/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 369, + 506, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 369, + 506, + 435 + ], + "spans": [ + { + "bbox": [ + 106, + 369, + 506, + 435 + ], + "type": "text", + "content": "Haonan Li, Yixuan Zhang, Fajri Koto, Yifei Yang, Hai Zhao, Yeyun Gong, Nan Duan, and Timothy Baldwin. CMMLU: Measuring massive multitask language understanding in Chinese. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 11260–11285, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.671. URL https://aclanthology.org/2024.findings-acl.671/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 443, + 506, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 443, + 506, + 532 + ], + "spans": [ + { + "bbox": [ + 106, + 443, + 506, + 532 + ], + "type": "text", + "content": "Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, and Xian Li. Few-shot learning with multilingual generative language models. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 9019-9052, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.616. URL https://aclanthology.org/2022.emnlp-main.616/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 539, + 506, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 539, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 106, + 539, + 506, + 573 + ], + "type": "text", + "content": "Lovish Madaan, Aaditya K Singh, Ryan Schaeffer, Andrew Poulton, Sanmi Koyejo, Pontus Stenetorp, Sharan Narang, and Dieuwke Hupkes. Quantifying variance in evaluation benchmarks. arXiv preprint arXiv:2406.10229, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 581, + 506, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 581, + 506, + 625 + ], + "spans": [ + { + "bbox": [ + 106, + 581, + 506, + 625 + ], + "type": "text", + "content": "Moran Mizrahi, Guy Kaplan, Dan Malkin, Rotem Dror, Dafna Shahaf, and Gabriel Stanovsky. State of what art? a call for multi-prompt LLM evaluation. Transactions of the Association for Computational Linguistics, 12:933-949, 2024. doi: 10.1162/tacl_a_00681. URL https://aclanthology.org/2024.tacl-1.52/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 633, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 633, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 633, + 506, + 723 + ], + "type": "text", + "content": "Niklas Muennighoff, Thomas Wang, Lintang Sutawika, Adam Roberts, Stella Biderman, Teven Le Scao, M Saiful Bari, Sheng Shen, Zheng Xin Yong, Hailey Schoelkopf, Xiangru Tang, Dragomir Radev, Alham Fikri Aji, Khalid Almubarak, Samuel Albanie, Zaid Alyafeai, Albert Webson, Edward Raff, and Colin Raffel. Crosslingual generalization through multitask finetuning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15991-16111, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.891. URL https://aclanthology.org/2023.acl-long.891/." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 505, + 722 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 138 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 138 + ], + "type": "text", + "content": "Xenia Ohmer, Elia Bruni, and Dieuwke Hupkes. Separating form and meaning: Using self-consistency to quantify task understanding across multiple senses. In Sebastian Gehrmann, Alex Wang, João Sedoc, Elizabeth Clark, Kaustubh Dhole, Khyathi Raghavi Chandu, Enrico Santus, and Hoorman Sedghamiz, editors, Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM), pages 258-276, Singapore, December 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023.gem-1.22/." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 147, + 505, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 147, + 505, + 190 + ], + "spans": [ + { + "bbox": [ + 106, + 147, + 505, + 190 + ], + "type": "text", + "content": "Xenia Ohmer, Elia Bruni, and Dieuwke Hupke. From form(s) to meaning: Probing the semantic depths of language models using multisense consistency. Computational Linguistics, 50(4):1507-1556, 12 2024. ISSN 0891-2017. doi: 10.1162/coli_a_00529. URL https://doi.org/10.1162/coli_a_00529." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 199, + 504, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 199, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 199, + 504, + 220 + ], + "type": "text", + "content": "OpenAI. Mmmlu dataset. https://huggingface.co/datasets/openai/MMMLU, 2025. Accessed: 2025-04-11." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 231, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 231, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 505, + 722 + ], + "type": "text", + "content": "OpenAI,., Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, Aleksander Madry, Alex Baker-Whitcomb, Alex Beutel, Alex Borzunov, Alex Carney, Alex Chow, Alex Kirillov, Alex Nichol, Alex Paine, Alex Renzin, Alex Tachard Passos, Alexander Kirillov, Alex Christakis, Alexis Conneau, Ali Kamali, Allan Jabri, Allison Moyer, Allison Tam, Amadou Crookes, Amin Tootoochian, Amin Tootoonchian, Ananya Kumar, Andrea Vallone, Andrej Karpathy, Andrew Braunstein, Andrew Cann, Andrew Codispoti, Andrew Galu, Andrew Kondrich, Andrew Tulloch, Andrey Mishchenko, Angela Baek, Angela Jiang, Antoine Pelisse, Antonia Woodford, Anuj Gosalia, Arka Dhar, Ashley Pantuliano, Avi Nayak, Avital Oliver, Barret Zoph, Behrooz Ghorbani, Ben Leimberger, Ben Rossen, Ben Sokolowsky, Ben Wang, Benjamin Zweig, Beth Hoover, Blake Samic, Bob McGrew, Bobby Spero, Bogo Giertler, Bowen Cheng, Brad Lightcap, Brandon Walkin, Brendan Quinn, Brian Guarraci, Brian Hsu, Bright Kellogg, Brydon Eastman, Camillo Lugaresi, Carroll Wainwright, Cary Bassin, Cary Hudson, Casey Chu, Chad Nelson, Chak Li, Chan Jun Shern, Channing Conger, Charlotte Barette, Chelsea Voss, Chen Ding, Cheng Lu, Chong Zhang, Chris Beaumont, Chris Hallacy, Chris Koch, Christian Gibson, Christina Kim, Christine Choi, Christine McLeavey, Christopher Hesse, Claudia Fischer, Clemens Winter, Coley Czarnecki, Colin Jarvis, Colin Wei, Constantin Koumouzelis, Dane Sherburn, Daniel Kappler, Daniel Levin, Daniel Levy, David Carr, David Farhi, David Mely, David Robinson, David Sasaki, Kenny Jin, Dev Valladares, Dimitris Tsipras, Doug Li, Duc Phong Nguyen, Duncan Findlay Edede Oiwoh, Edmund Wong Ehsan Asdar Elizabeth Proehl Elizabeth Yang Eric Antonow Eric Kramer Eric Peterson Eric Sigler Eric Wallace Eugene Brevdo Evan Mays Farzad Khorasani Felipe Petroski Such Filippo Raso Francis Zhang Fred von Lohmann Freddie Sult Gabriel Goh Gene Oden Geoff Salmon Giulio Starace Greg Brockman Hadi Salman Haiming Bao Haitang Hu Hannah Wong Haoyu Wang Heather Schmidt Heather Whitney Heewoo Jun Hendrik Kirchner Henrique Ponde de Oliveira Pinto Hongyu Ren Huiwen Chang Hyung Won Chung Ian Kivlichan Ian O'Connell Ian O'Connell Ian Osband Ian Silber Ian Sohl Ibrahim Okuyucu Ikai Lan Ilya Kostrikov Ilya Sutskever Ingmar Kanitscheider Ishaan Gulrajani Jacob Coxon Jacob Menick Jakub Pachocki James Aung James Betker James Crooks James Lennon Jamie Kiros Jan Leike Jane Park Jason Kwon Jason Phang Jason Teplitz Jason Wei Jason Wolfe Jay Chen Jeff Harris Jenia Varavva Jessica Gan Lee Jessica Shieh Ji Lin Jiahui Yu Jiayi Weng Jie Tang Jieqi Yu Joanne Jang Joaquin Quinonero Candela Joe Beutler Joe Landers Joel Parish Johannes Heidecke John Schulman Jonathan Lachman Jonathan McKay Jonathan Uesato Jonathan Ward Jong Wook Kim Joost Huizinga Jordan Sitkin Jos Kraaijeveld Josh Gross Josh Kaplan Josh Snyder Joshua Achiam Joy Jiao Joyce Lee Juntang Zhuang Justyn Harriman Kai Fricke Kai Hayashi Karan Singhal Katy Shi Kevin Karthik Kayla Wood Kendra Rimbach Kenny Hsu Kenny Nguyen Keren Gu-Lemberg Kevin Button Kevin Liu Kiel Howe Krithika Muthukumar Kyle Luther Lama Ahmad Larry Kai Lauren Itow Lauren Workman Leher Pathak Leo Chen Li Jing Lia Guy Liam Fedus Liang Zhou Lien Mamitsuka Lilian Weng Lindsay McCallum Lindsey Held Long Ouyang Louis Feuvrier Lu Zhang Lukas Kondraciuk Lukasz Kaiser Luke Hewitt Luke Metz Lyric Doshi Mada Aflak Maddie Simens Madelaine Boyd Madeleine Thompson Marat Dukhan Mark Chen Mark Gray Mark Hudnall Marvin Zhang Marwan Aljubeh Mateusz Litwin Matthew Zeng Max Johnson Maya Shetty Mayank Gupta Meghan Shah Mehmet Yatbaz Meng Jia Yang Mengchao Zhong Mia Glaese Mianna Chen Michael Janner Michael Lampe Michael Petrov Michael Wu Michele Wang Michelle Fradin Michelle Pokrass Miguel Castro Miguel Oom Temudo de Castro Mikhail Pavlov Miles" + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 115, + 72, + 506, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 72, + 506, + 323 + ], + "spans": [ + { + "bbox": [ + 115, + 72, + 506, + 323 + ], + "type": "text", + "content": "Brundage, Miles Wang, Minal Khan, Mira Murati, Mo Bavarian, Molly Lin, Murat Yesildal, Nacho Soto, Natalia Gimelshein, Natalie Cone, Natalie Staudacher, Natalie Summers, Natan LaFontaine, Neil Chowdhury, Nick Ryder, Nick Stathas, Nick Turley, Nik Tezak, Nik Felix, Nithanth Kudige, Nitish Keskar, Noah Deutsch, Noel Bundick, Nora Puckett, Ofir Nachum, Ola Okelola, Oleg Boiko, Oleg Murk, Oliver Jaffe, Olivia Watkins, Olivier Godement, Owen Campbell-Moore, Patrick Chao, Paul McMillan, Pavel Belov, Peng Su, Peter Bak, Peter Bakkum, Peter Deng, Peter Dolan, Peter Hoeschele, Peter Welinder, Phil Tillet, Philip Pronin, Philippe Tillet, Prafulla Dhariwal, Qiming Yuan, Rachel Dias, Rachel Lim, Rahul Arora, Rajan Troll, Randall Lin, Rapha Gontijo Lopes, Raul Puri, Reah Miyara, Reimar Leike, Renaud Gaubert, Reza Zamani, Ricky Wang, Rob Donnelly, Rob Honsby, Rocky Smith, Rohan Sahai, Rohit Ramchandani, Romain Huet, Rory Carmichael, Rowan Zellers, Roy Chen, Ruby Chen, Ruslan Nigmatullin, Ryan Cheu, Saachi Jain, Sam Altman, Sam Schoenholz, Sam Toizer, Samuel Miserendino, Sandhini Agarwal, Sara Culver, Scott Ethersmith, Scott Gray, Sean Grove, Sean Metzger, Shamez Hermani, Shantanu Jain, Shengjia Zhao, Sherwin Wu, Shino Jomoto, Shirong Wu, Shuaiqi, Xia, Sonia Phene, Spencer Papay, Srinivas Narayanan, Steve Coffey, Steve Lee, Stewart Hall, Suchir Balaji Tal Broda Tal Stramer, Tao Xu, Tarun Gogineni, Taya Christianson, Ted Sanders, Tejal Patwardhan, Thomas Cunninghamman, Thomas Degry, Thomas Dimson, Thomas Raoux, Thomas Shadwell, Tianhao Zheng Todd Underwood,Todor Markov,Toki Sherbakov,Tom Rubin Tom Stasi Tomer Kaftan. Tristan Heywood,Troy Peterson,Tyce Walters,Tyna Eloundou,V Valerie Qi,Veit Moeller,Vinnie Monaco,Vishal Kuo,Vlad Fomenko,Wayne ChangWeiyi ZhengWenda ZhouWesam Manassra Will Sheu Wojciech Zaremba,Yash Patil Yilei Qian Yongjik Kim Youlong ChengYu Zhang. Yuchen He,Yuchen Zhang,Yujia Jin,Yunxing Dai,and Yury Malkov.Gpt-4o system card2024. URL https://arxiv.org/abs/2410.21276." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 329, + 506, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 329, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 106, + 329, + 506, + 384 + ], + "type": "text", + "content": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Pierre Isabelle, Eugene Charniak, and Dekang Lin, editors, Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA, July 2002. Association for Computational Linguistics. doi: 10.3115/1073083.1073135. URL https://aclanthology.org/P02-1040/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 390, + 506, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 390, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 106, + 390, + 506, + 456 + ], + "type": "text", + "content": "Edoardo Maria Ponti, Goran Glavaš, Olga Majewska, Qianchu Liu, Ivan Vulić, and Anna Korhonen. XCOPA: A multilingual dataset for causal commonsense reasoning. In Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu, editors, Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2362-2376, Online, November 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.185. URL https://aclanthology.org/2020.emnlp-main.185/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 462, + 506, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 462, + 506, + 518 + ], + "spans": [ + { + "bbox": [ + 106, + 462, + 506, + 518 + ], + "type": "text", + "content": "Maja Popovic. chrF: character n-gram F-score for automatic MT evaluation. In Ondrej Bojar, Rajan Chatterjee, Christian Federmann, Barry Haddow, Chris Hokamp, Matthias Huck, Varvara Logacheva, and Pavel Pecina, editors, Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392-395, Lisbon, Portugal, September 2015. Association for Computational Linguistics. doi: 10.18653/v1/W15-3049. URL https://aclanthology.org/W15-3049/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 523, + 506, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 523, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 106, + 523, + 506, + 578 + ], + "type": "text", + "content": "Jirui Qi, Raquel Fernández, and Arianna Bisazza. Cross-lingual consistency of factual knowledge in multilingual language models. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 10650-10666, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.658. URL https://aclanthology.org/2023.emnlp-main.658/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 584, + 506, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 584, + 506, + 661 + ], + "spans": [ + { + "bbox": [ + 106, + 584, + 506, + 661 + ], + "type": "text", + "content": "Qwen,.; An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tianyi Tang, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 667, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 667, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 667, + 506, + 722 + ], + "type": "text", + "content": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. SQuAD: 100,000+ questions for machine comprehension of text. In Jian Su, Kevin Duh, and Xavier Carreras, editors, Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2383-2392, Austin, Texas, November 2016. Association for Computational Linguistics. doi: 10.18653/v1/D16-1264. URL https://aclanthology.org/D16-1264/." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 107, + 72, + 506, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 72, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 107, + 72, + 506, + 214 + ], + "type": "text", + "content": "Angelika Romanou, Negar Foroutan, Anna Sotnikova, Zeming Chen, Sree Harsha Nelaturu, Shivalika Singh, Rishabh Maheshwary, Micol Altomare, Mohamed A. Haggag, Snegha A, Alfonso Amayuelas, Azril Hafizi Amirudin, Viraat Aryabumi, Danylo Boiko, Michael Chang, Jenny Chim, Gal Cohen, Aditya Kumar Dalmia, Abraham Diress, Sharad Duwal, Daniil Dzenhaliou, Daniel Fernando Erazo Florez, Fabian Farestam, Joseph Marvin Imperial, Shayekh Bin Islam, Perttu Isotalo, Maral Jabbarishiviari, Borje F. Karlsson, Eldar Khalilov, Christopher Klamm, Fajri Koto, Dominik Krzeminski, Gabriel Adriano de Melo, Syrielle Montariol, Yiyang Nan, Joel Niklaus, Jekaterina Novikova, Johan Samir Obando Ceron, Debjit Paul, Esther Ploeger, Jebish Purbey, Swati Rajwal, Selvan Sunitha Ravi, Sara Rydell, Roshan Santhosh, Drishti Sharma, Marjana Prifti Skenduli, Arshia Soltani Moakhar, Bardia Soltani Moakhar, Ran Tamir, Ayush Kumar Tarun, Azmine Toushik Wasi, Thenuka Ovin Weerasinghe, Serhan Yilmaz, Mike Zhang, Imanol Schlag, Marzieh Fadaee, Sara Hooker, and Antoine Bosselut. INCLUDE: evaluating multilingual language understanding with regional knowledge, 2024. URL https://doi.org/10.48550/arXiv.2411.19799." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 224, + 506, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 224, + 506, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 506, + 267 + ], + "type": "text", + "content": "Eduardo Sánchez, Belen Alastruey, Christophe Ropers, Pontus Stenetorp, Mikel Artetxe, and Marta R. Costa-jussà. Linguini: A benchmark for language-agnostic linguistic reasoning. CoRR, abs/2409.12126, 2024. doi: 10.48550/ARXIV.2409.12126. URL https://doi.org/10.48550/arXiv.2409.12126." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 277, + 506, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 277, + 506, + 366 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 506, + 366 + ], + "type": "text", + "content": "Priyanka Sen, Alham Fikri Aji, and Amir Saffari. Mintaka: A complex, natural, and multilingual dataset for end-to-end question answering. In Nicoletta Calzolari, Chu-Ren Huang, Hansaem Kim, James Pustejovsky, Leo Wanner, Key-Sun Choi, Pum-Mo Ryu, Hsin-Hsi Chen, Lucia Donatelli, Heng Ji, Sadao Kurohashi, Patrizia Paggio, Nianwen Xue, Seokhwan Kim, Younggyun Hahm, Zhong He, Tony Kyungil Lee, Enrico Santus, Francis Bond, and Seung-Hoon Na, editors, Proceedings of the 29th International Conference on Computational Linguistics, pages 1604-1619, Gyeongju, Republic of Korea, October 2022. International Committee on Computational Linguistics. URL https://aclanthology.org/2022.coling-1.138/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 375, + 506, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 375, + 506, + 431 + ], + "spans": [ + { + "bbox": [ + 105, + 375, + 506, + 431 + ], + "type": "text", + "content": "Sheikh Shafayat, H Hasan, Minhajur Mahim, Rifki Putri, James Thorne, and Alice Oh. BEnQA: A question answering benchmark for Bengali and English. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1158-1177, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10. 18653/v1/2024-findings-acl.68. URL https://aclanthology.org/2024-findings-acl.68/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 440, + 506, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 440, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 506, + 495 + ], + "type": "text", + "content": "Freda Shi, Mirac Suzgun, Markus Freitag, Xuezhi Wang, Suraj Srivats, Soroush Vosoughi, Hyung Won Chung, Yi Tay, Sebastian Ruder, Denny Zhou, Dipanjan Das, and Jason Wei. Language models are multilingual chain-of-thought reasoners. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. URL https://openreview.net/forum?id=fR3wGck-IXp." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 505, + 506, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 505, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 506, + 582 + ], + "type": "text", + "content": "Shivalika Singh, Angelika Romanou, Clémentine Fourrier, David Ifeoluwa Adelani, Jian Gang Ngui, Daniel Vila-Suero, Peerat Limkonchotiwat, Kelly Marchisio, Wei Qi Leong, Yosephine Susanto, Raymond Ng, Shayne Longpre, Wei-Yin Ko, Madeline Smith, Antoine Bosselut, Alice Oh, Andre F. T. Martins, Leshem Choshen, Daphne Ippolito, Enzo Ferrante, Marzieh Fadaee, Beyza Ermis, and Sara Hooker. Global MMLU: understanding and addressing cultural and linguistic biases in multilingual evaluation. CoRR, abs/2412.03304, 2024. doi: 10.48550/ARXIV.2412.03304. URL https://doi.org/10.48550/arXiv.2412.03304." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 592, + 504, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 592, + 504, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 504, + 613 + ], + "type": "text", + "content": "Mistral AI team. Cheaper, better, faster, stronger, 2024. URL https://mistral.ai/news/mixtral-8x22b. Accessed: 4-Apr-2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 624, + 504, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 504, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 504, + 657 + ], + "type": "text", + "content": "Aman Singh Thakur, Kartik Choudhary, Venkat Srinik Ramayapally, Sankaran Vaidyanathan, and Dieuwke Hupkes. Judging the judges: Evaluating alignment and vulnerabilities in Ilms-as-judges. CoRR, abs/2406.12624, 2024. URL https://doi.org/10.48550/arXiv.2406.12624." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 666, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 506, + 722 + ], + "type": "text", + "content": "Lucas Weber, Elia Bruni, and Dieuwke Hupkes. Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning. In Jing Jiang, David Reitter, and Shumin Deng, editors, Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL), pages 294-313, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.conll-1.20. URL https://aclanthology.org/2023.conll-1.20/." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 285 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 128 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 128 + ], + "type": "text", + "content": "Weihao Xuan, Rui Yang, Heli Qi, Qingcheng Zeng, Yunze Xiao, Yun Xing, Junjue Wang, Huitao Li, Xin Li, Kunyu Yu, Nan Liu, Qingyu Chen, Douglas Teodoro, Edison Marrese-Taylor, Shijian Lu, Yusuke Iwasawa, Yutaka Matsuo, and Irene Li. Mmlu-prox: A multilingual benchmark for advanced large language model evaluation. CoRR, abs/2503.10497, 2025. URL https://doi.org/10.48550/arXiv.2503.10497." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 134, + 506, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 134, + 506, + 213 + ], + "spans": [ + { + "bbox": [ + 106, + 134, + 506, + 213 + ], + "type": "text", + "content": "Wenxuan Zhang, Mahani Aljunied, Chang Gao, Yew Ken Chia, and Lidong Bing. M3exam: A multilingual, multimodal, multilevel benchmark for examining large language models. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine, editors, Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/117c5c8622b0d539f74f6d1fb082a2e9-Abstract-Datasets_and_Benchmarks.html." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 219, + 506, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 219, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 106, + 219, + 506, + 285 + ], + "type": "text", + "content": "Yuan Zhang, Jason Baldridge, and Luheng He. PAWS: Paraphrase adversaries from word scrambling. In Jill Burstein, Christy Doran, and Thamar Solorio, editors, Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1298-1308, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1131. URL https://aclanthology.org/N19-1131/." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 270, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 270, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 270, + 83 + ], + "type": "text", + "content": "A Additional dataset statistics" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 96, + 451, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 96, + 451, + 109 + ], + "spans": [ + { + "bbox": [ + 105, + 96, + 451, + 109 + ], + "type": "text", + "content": "For reference, we provide a few dataset statistics beyond the main results in the paper." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 127, + 116, + 481, + 251 + ], + "blocks": [ + { + "bbox": [ + 127, + 116, + 481, + 251 + ], + "lines": [ + { + "bbox": [ + 127, + 116, + 481, + 251 + ], + "spans": [ + { + "bbox": [ + 127, + 116, + 481, + 251 + ], + "type": "image", + "image_path": "0ee44851e6330b28bb51edddb359d3b46ffd35f998c59f2c4c70256478702034.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 255, + 504, + 289 + ], + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 289 + ], + "type": "text", + "content": "Figure 8: Distribution of output types on the dev split. We show the normalised distribution of correct output types across languages, ordered (from bottom to top) by average frequency. Rare output types that occur only a few times are mapped to the category 'other'." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 306, + 504, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 504, + 372 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 504, + 372 + ], + "type": "text", + "content": "Output type distribution In Figure 8, we show the per-language distribution of output types for MultiLoKo dev split.10 We mapped very rare output types, such as 'a quantity', 'a period of time' or 'letter' to 'other', for plotting purposes. We can see that name is the most common output type across languages, followed by the generic output type a word and number. Also place and date are relatively common output types, whereas most other output types occur very infrequently or only for a handful of languages." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 136, + 385, + 473, + 612 + ], + "blocks": [ + { + "bbox": [ + 136, + 385, + 473, + 612 + ], + "lines": [ + { + "bbox": [ + 136, + 385, + 473, + 612 + ], + "spans": [ + { + "bbox": [ + 136, + 385, + 473, + 612 + ], + "type": "image", + "image_path": "bd414948b1fc64eac369d890930e1fd6fdc455be1d18c2df3bdd0cd54887fd29.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 616, + 504, + 640 + ], + "lines": [ + { + "bbox": [ + 104, + 616, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 504, + 640 + ], + "type": "text", + "content": "Figure 9: Average question and answer lengths. We show the per-question average length (in words) of the locally-sourced questions and answers, human-translated into English." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 655, + 504, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 504, + 700 + ], + "type": "text", + "content": "Input and output length In addition to that, we show the average question – and output lengths of human-translated the locally sourced questions to English in Figure 9. While there is some variation in particular in question length, the lengths of the answers are relatively consistent. The average answer length is around 2, combining one-word answers with (usually) longer names." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 115, + 710, + 424, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 710, + 424, + 722 + ], + "spans": [ + { + "bbox": [ + 115, + 710, + 424, + 722 + ], + "type": "text", + "content": "10Because the test split is blind, we do not report the distribution of output types here." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 236, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 236, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 236, + 85 + ], + "type": "text", + "content": "B Instruction following" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 96, + 506, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 96, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 104, + 96, + 506, + 152 + ], + "type": "text", + "content": "To facilitate evaluation, we instruct models to answer question with only a number/place/etc. Overall, we found that base models (with a five-shot template) are much better at abiding by this instruction than chat models, which exhibit a number of pathologies. While some of those can be caught with appropriate post-processing (see Appendix C, this is not the case for all issues. Below, we provide a summary of the main instruction-following issues we encountered with chat models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 159, + 504, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 159, + 504, + 183 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 504, + 183 + ], + "type": "text", + "content": "False refusals Sometimes chat models refuse to provide an answer when the question is falsely perceived to be inappropriate (e.g. when the question asks about someone aged younger than 18)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 190, + 506, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 190, + 506, + 257 + ], + "spans": [ + { + "bbox": [ + 104, + 190, + 506, + 257 + ], + "type": "text", + "content": "Producing full sentences Another issue we observed is that chat models would provide a full sentence answer, rather than a single word or phrase (e.g. Which year was Francisco Franco born? Produce a year only. - Francisco Franco was born in 1936). Such full-sentence answers make exact match rating impossible. The effect is not consistent across languages and happens only for some of the examples, without any discernable pattern, and therefore difficult to address completely with post-processing.[11]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 264, + 505, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 264, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 264, + 505, + 309 + ], + "type": "text", + "content": "Spurious addition of \"answer is\" Likely due to overtraining on MMLU style tasks, Models such as OpenAI's GPT4 and Gemini 2.0 preface the vast majority of the answers in English with \"answer is\" or \"X answer is X\" where X is the desired correct response. This is remarkable, because it is essentially a repetition of the end of the prompt. However, it is easy to fix in post-processing." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 316, + 506, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 506, + 351 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 506, + 351 + ], + "type": "text", + "content": "Japanese specific issues In Japanese, in general it is not polite to answer with incomplete sentences. As such chat models often append the copula verb \"desu\" to the answer, making exact match unsuccessful. We are able to fix this in postprocessing." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 357, + 505, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 505, + 425 + ], + "type": "text", + "content": "Claude 3.5 Sonnet issues We were unable to make Claude 3.5 Sonnet follow the instructions to produce just an answer in English. It seemed to engage in a long chain-of-thought reasoning style response which we were unable to reliably parse. This issue only manifests in English and only with Claude. For this reason, we exclude Claude 3.5 Sonnet from our knowledge transfer results, as it would make the average lack of knowledge transfer from non-English languages to English more severe than they are." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 441, + 246, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 246, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 246, + 455 + ], + "type": "text", + "content": "C Post-processing details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 467, + 504, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 467, + 504, + 490 + ], + "spans": [ + { + "bbox": [ + 104, + 467, + 504, + 490 + ], + "type": "text", + "content": "We perform the following post-processing for both the reference answers and the answers produced by the model:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 499, + 310, + 544 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 132, + 499, + 310, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 499, + 310, + 511 + ], + "spans": [ + { + "bbox": [ + 132, + 499, + 310, + 511 + ], + "type": "text", + "content": "- Remove leading and trailing whitespaces." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 516, + 229, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 516, + 229, + 528 + ], + "spans": [ + { + "bbox": [ + 132, + 516, + 229, + 528 + ], + "type": "text", + "content": "- Remove punctuation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 533, + 234, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 533, + 234, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 533, + 234, + 544 + ], + "type": "text", + "content": "- Lowercase everything." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 554, + 410, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 554, + 410, + 566 + ], + "spans": [ + { + "bbox": [ + 105, + 554, + 410, + 566 + ], + "type": "text", + "content": "We perform the following additional post-processing for pretrained models:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 575, + 471, + 603 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 132, + 575, + 471, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 575, + 471, + 587 + ], + "spans": [ + { + "bbox": [ + 132, + 575, + 471, + 587 + ], + "type": "text", + "content": "- Remove leading \"Answer:\" or \"A:\" or the non-English equivalent from the output." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 592, + 311, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 592, + 311, + 603 + ], + "spans": [ + { + "bbox": [ + 132, + 592, + 311, + 603 + ], + "type": "text", + "content": "- Remove everything after the first newline." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 613, + 411, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 613, + 411, + 625 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 411, + 625 + ], + "type": "text", + "content": "We perform the following additional post-processing for postrained models:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 635, + 504, + 692 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 132, + 635, + 260, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 635, + 260, + 647 + ], + "spans": [ + { + "bbox": [ + 132, + 635, + 260, + 647 + ], + "type": "text", + "content": "- Remove leading \"answer is:\"" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 652, + 504, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 652, + 504, + 675 + ], + "spans": [ + { + "bbox": [ + 132, + 652, + 504, + 675 + ], + "type": "text", + "content": "- Detect the pattern \"X answer is X\", where X is the desired answer, and strip the unnecessary part in the middle." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 680, + 290, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 680, + 290, + 692 + ], + "spans": [ + { + "bbox": [ + 132, + 680, + 290, + 692 + ], + "type": "text", + "content": "- Remove training \"desu\" in Japanese." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 104, + 700, + 504, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 722 + ], + "type": "text", + "content": "11Using a judge-LLM may to some extent address this problem, but at the expense of other issues (e.g. Thakur et al., 2024)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 250, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 250, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 250, + 83 + ], + "type": "text", + "content": "D Annotation instructions" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 96, + 506, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 96, + 506, + 131 + ], + "spans": [ + { + "bbox": [ + 104, + 96, + 506, + 131 + ], + "type": "text", + "content": "Our annotation pipeline contains five stages: 1) locality rating, 2) question generation 3) question review, 4) question answering, and 5) translation. Below, we provide the annotation instructions for each of these stages." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 144, + 196, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 144, + 196, + 156 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 196, + 156 + ], + "type": "text", + "content": "D.1 Locality rating" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 164, + 506, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 164, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 104, + 164, + 506, + 253 + ], + "type": "text", + "content": "To narrow-down the initial selection of paragraphs – sampled from the top-rated Wikipedia pages of the respective locales – the first step in our annotation pipeline is locality rating. Given a paragraph, we ask annotators to rate whether the paragraph is locally relevant to the particular locale, on a likertscale from 1 to 5, where 1 refers to extremely local and relatively obscure topics very specifically related to the specific language or locale and with little international recognition and 5 to globally well-known topics. We also ask annotators to disregard pages about inappropriate or politically sensitive topics. The rubric for locality annotation can be found in Table 3. We disregard everything with a locality rating of 3 or lower." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 106, + 264, + 504, + 592 + ], + "blocks": [ + { + "bbox": [ + 106, + 264, + 504, + 592 + ], + "lines": [ + { + "bbox": [ + 106, + 264, + 504, + 592 + ], + "spans": [ + { + "bbox": [ + 106, + 264, + 504, + 592 + ], + "type": "table", + "html": "
DescriptionExample
1.Extremely local and relatively obscure. Content that is of interest only to a small, localized group, such as a specific town, region, or community. These topics are typically obscure and not widely known beyond their immediate area.Local radio stations, small town historical events, regional businesses, or niche local cultural practices.
2.Regional interest. Topics that have some relevance beyond a specific locality but are still primarily of interest within a particular region or country.State or provincial politicians, regional cuisine, local sports teams, or medium-sized companies with regional influence.
3.National Significance. Content that is widely recognized within a single country, but relatively un-known internationally.National politicians (not internationally known), popular national media figures, major corporations within a country, or significant national historical events.
4.International recognition. Topics that are recognized and have relevance in multiple countries but may not be universally known across the globe. These topics often have international influence and are likely to be covered in international media, though their impact may vary by region.International brands which may be recognized in more than one country, celebrities with some international reach, significant cultural movements, or political conflicts with some awareness on the international stage.
5.Global prominence. Content that is widely recognized and relevant across a large number of countries around the world. These topics have a global impact or appeal and are likely to be well-represented in media across diverse cultures and regions.Globally famous celebrities (e.g., Cristiano Ronaldo), multinational corporations (e.g., Apple), major world events, or universally recognized cultural icons.
", + "image_path": "fa2dfae75eef0a7bb838f0dfc25bd66e8d7acffe982633b0c359ac0cb43632f3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 594, + 504, + 617 + ], + "lines": [ + { + "bbox": [ + 104, + 594, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 504, + 617 + ], + "type": "text", + "content": "Table 3: Rubric for locality rating task. In the locality rating task, we ask the annotators to rate paragraphs with respect to how locally relevant the topic is to the locale." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 647, + 219, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 219, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 219, + 658 + ], + "type": "text", + "content": "D.2 Question generation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "type": "text", + "content": "The second and main annotation step in our pipeline is the step in which we ask annotators to generate questions about sampled paragraphs. We ask annotators to generate a challenging question with a short answer. The answer should be easy to evaluate with string-matching metrics, the questions should not be open-ended or have many possible correct answers, be ambiguous or subjective, and the expected short answer should be concise. To ensure difficulty, we ask that answering the question" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 204 + ], + "type": "text", + "content": "requires combining information from different parts in the accompanying text; It should not be answerable by mere regurgitation of a single sentence. We furthermore ask that the question is formulated such that its answer will not change over time (e.g. not 'How many medals has Sifan Hassan won', but 'How many medals has Sifan Hassan won between 2018 and 2022 (including)'), and that the question is answerable also without the article (e.g. not 'How many tv shows did the person in this article produce?'). To facilitate validation checks in the next round, we also ask that the question authors write a longer answer to explain how they arrived at the short answer. We also ask the question authors to annotate what is the type of the correct answer (e.g. number, name, date, etc) In the pilot, we observed that – for some languages – the vast majority of questions were questions that required some form of numerical reasoning. Because the intention of the benchmark is to address knowledge more than reasoning, we afterwards restricted the number of numerical questions to " + }, + { + "bbox": [ + 104, + 72, + 506, + 204 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 72, + 506, + 204 + ], + "type": "text", + "content": ". Similarly, we asked question authors to avoid yes/no questions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 217, + 202, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 217, + 202, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 202, + 228 + ], + "type": "text", + "content": "D.3 Question review" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 236, + 504, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 236, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 504, + 270 + ], + "type": "text", + "content": "In the first round of question review, we asked annotators from a different provider to judge whether the questions abide by the rules provided to the question authors. All question reviewers are native speakers. Specifically, we ask them to check if:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 278, + 504, + 388 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 132, + 278, + 334, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 278, + 334, + 289 + ], + "spans": [ + { + "bbox": [ + 132, + 278, + 334, + 289 + ], + "type": "text", + "content": "- The question pertains to a locally relevant topic" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 293, + 380, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 293, + 380, + 304 + ], + "spans": [ + { + "bbox": [ + 132, + 293, + 380, + 304 + ], + "type": "text", + "content": "- The question is clear and understandable, and not subjective" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 308, + 321, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 308, + 321, + 319 + ], + "spans": [ + { + "bbox": [ + 132, + 308, + 321, + 319 + ], + "type": "text", + "content": "- The question has a clear and concise answer" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 322, + 504, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 322, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 132, + 322, + 504, + 344 + ], + "type": "text", + "content": "- If there are multiple possible variations of the answer possible (e.g. 'Dick Schoof' / 'Minister Dick Schoof' / 'Prime Minister Dick Schoof' / etc), all versions of the answer are provided." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 348, + 351, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 348, + 351, + 359 + ], + "spans": [ + { + "bbox": [ + 132, + 348, + 351, + 359 + ], + "type": "text", + "content": "- The question and answer are in the correct language" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 362, + 343, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 362, + 343, + 373 + ], + "spans": [ + { + "bbox": [ + 132, + 362, + 343, + 373 + ], + "type": "text", + "content": "- The question is understandable without the article" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 376, + 429, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 376, + 429, + 388 + ], + "spans": [ + { + "bbox": [ + 132, + 376, + 429, + 388 + ], + "type": "text", + "content": "- That the answer to the question will not likely change in the near future" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 397, + 504, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 504, + 463 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 504, + 463 + ], + "type": "text", + "content": "When a question can be fixed with a minor change (e.g. add a time indication to make sure an answer will not change in the near future, or add an extra answer version), we ask the question reviewers to implement this fix and describe it. In the pilot round, we use the annotator feedback to finetune our annotation protocol and provide feedback to the question-authors. During the rest of the data collection, we simply disregard questions that are not useable as is or can be corrected with minor changes." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 475, + 298, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 298, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 298, + 488 + ], + "type": "text", + "content": "D.4 Validation through question answering" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 496, + 506, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 506, + 617 + ], + "type": "text", + "content": "In the last stage of our question generation pipeline, we have additional annotators answer the sourced and reviewed question. The goal of this validation task is to confirm that the questions are answerable, correct, non-ambiguous when read by individuals other than the original question author, and that all possible versions of the answers are included. For each question, we ask two additional annotators to first answer the question, using the snippets the questions were sourced from for context. After they have answered the question, they are shown the list of reference answers written by the original author of the question as well as the rational they provided, and we ask them to reflect upon the answer they gave themselves. If their answer did not match any answer in the original reference list, we ask them to either add their answer to the list if it is semantically equivalent to their own answer or indicate which answer they believe to be correct, their own or the original answer. We disregard all questions where at least one annotator disagrees with the original question author." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 632, + 197, + 644 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 632, + 197, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 197, + 644 + ], + "type": "text", + "content": "E Related work" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 656, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 504, + 723 + ], + "type": "text", + "content": "In this paper, we introduce a new multilingual benchmark for LLMs, that we believe addresses gaps and pitfalls in existing benchmarks. We (concisely) outlined those gaps and pitfalls and mentioned several other works related to ours in the introduction of those paper. Here, we discuss multilingual evaluation of LLMs in more detail. Specifically, we discuss what datasets recent LLM releases have used for multilingual evaluation (Appendix E.1) and what other datasets and approaches they could have used but did not (Appendix E.2)." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 70, + 504, + 218 + ], + "blocks": [ + { + "bbox": [ + 108, + 70, + 504, + 218 + ], + "lines": [ + { + "bbox": [ + 108, + 70, + 504, + 218 + ], + "spans": [ + { + "bbox": [ + 108, + 70, + 504, + 218 + ], + "type": "table", + "html": "
Claude 3.5 SonnetMGSM (Shi et al., 2023)
Gemini 2.0 FlashMentions multilingual audio, no multilingual benchmarks scores reported.
GPT4-oARC-Easy and TruthfulQA translated into five African languages (internal benchmark), Uhura-Eval (internal benchmark).
Llama 3.1MGSM (Shi et al., 2023), Multilingual MMLU (internal benchmark)
Mixtral 8x22Btranslated ARC-C, HellaSwag and MMLU (internal benchmarks)
Qwen2.5 72BM3Exam (Zhang et al., 2023), IndoMMLU (Koto et al., 2023), ruMMLU (Fenogenova et al., 2024), translated MMLU (Chen et al., 2023), Belebele (Bandarkar et al., 2024), XCOPA (Ponti et al., 2020), XWinograd (Muennighoff et al., 2023), XStoryClose (Lin et al., 2022), PAWS-X (Zhang et al., 2019), MGSM (Shi et al., 2023), Flores-101 (Goyal et al., 2022)
", + "image_path": "6097bbba21f178dc4be15a6d96ad58e0b1c3c7cc17e299a76a31147c2d105aca.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 221, + 504, + 257 + ], + "lines": [ + { + "bbox": [ + 104, + 221, + 504, + 257 + ], + "spans": [ + { + "bbox": [ + 104, + 221, + 504, + 257 + ], + "type": "text", + "content": "Table 4: Multilingual evaluation of recent LLM releases, overview. We provide an overview table of the benchmark for which scores are reported in the release papers or notes of the LLMs we evaluated in this paper. Models are sorted alphabetically." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 284, + 320, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 284, + 320, + 296 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 320, + 296 + ], + "type": "text", + "content": "E.1 Multilingual evaluation of LLMs in practice" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 304, + 504, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 403 + ], + "type": "text", + "content": "While multilinguality is something frequently mentioned in the release papers or posts of recent LLM releases, the datasets for which they actual report scores is in most cases quite limited. Of the models that we evaluated for this paper, Gemini 2.0 Flash reported no multilingual scores at all; GPT4-o and Mixtral 8x22B report scores only on internally translated but not publicly available English benchmarks; Claude 3.5 Sonnet reports scores for only one benchmark - MGSM. MGSM is also the only publicly available benchmark for which Llama 3.1 reports scores, along with - also - an internally translated version of MMLU that is not publicly available. The only model that extensively reports multilingual benchmark values, on more than 10 benchmarks, is Qwen2.5 72B. We provide an overview of the multilingual benchmarks for which scores are reported for these models in Table 4." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 415, + 309, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 309, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 309, + 427 + ], + "type": "text", + "content": "E.2 Multilingual evaluation options for LLMs" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 435, + 506, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 435, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 435, + 506, + 469 + ], + "type": "text", + "content": "While, as we discuss below, there are gaps and challenges with multilingual evaluation for LLMs, there are in fact many more options than is suggested by what is reported in recent releases. Below, we discuss other options for multilingual LLM evaluation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 479, + 506, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 506, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 506, + 677 + ], + "type": "text", + "content": "Translated English benchmarks As mentioned earlier on, benchmarks used for LLM evaluation are often translated English benchmarks. In some cases, the benchmarks were designed to evaluate only English and translated later, such as translated MMLU (e.g. Li et al., 2024; Chen et al., 2023; OpenAI, 2025; Singh et al., 2024) or MMLU-ProX (Xuan et al., 2025), MGSM (Shi et al., 2023) or MLAMA (Kassner et al., 2021). In other cases, the benchmark was multilingual at the time of its creation, but means of creation of the non-English data was through translating English sourced data, such as Belebele Bandarkar et al. (2024), Mintaka (Sen et al., 2022), or X-FACTR (Jiang et al., 2020). Taken together, translated benchmarks span quite a range of tasks, such as question answering (Artetxe et al., 2020; Lewis et al., 2020; Qi et al., 2023; Ohmer et al., 2023), natural language inference (Conneau et al., 2018), paraphrase detection (Zhang et al., 2019), general linguistic competence (Jumelet et al., 2025), reading comprehension (Artetxe et al., 2020; Bandarkar et al., 2024) and commonsense reasoning (Ponti et al., 2020), and even instruction following (He et al., 2024). With the exception of question answering and of course instruction following, however, many of these tasks have gone (somewhat) out of fashion for LLM evaluation, a trend which is mirrored also in the usage of their multilingual counterparts. As mentioned before, translated benchmarks have the advantage of containing parallel data, allowing for some form of comparability across languages, but are English-centric in content and may suffer from translationese (see e.g. Romanou et al., 2024; Chen et al., 2024, for a recent discussion of this)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "Multilingual benchmarks sourced from scratch Though much rarer, there are also benchmarks that are created independently for each language they include. Clark et al. (2020) release a question answering dataset separately sourced for 11 different languages, with a protocol relatively similar" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 193 + ], + "type": "text", + "content": "to ours. In a different category, Hardalov et al. (2020), Zhang et al. (2023) and Romanou et al. (2024) and Sánchez et al. (2024) do not create benchmark data, but instead collect existing exam or competition questions from official human exams. In case of Zhang et al. (2023), the exams are graduation exams of primary, middle and high school; Hardalov et al. (2020) includes official state exams taken by graduating high school students, which may contain parallel pairs in case countries allow examinations to be taken in multiple languages; Romanou et al. (2024), cover academic exams at middle and high school and university level, professional certifications and licenses, and exams to obtain regional licenses. Sánchez et al. (2024) instead focus on questions from the International Linguistic Olympiad corpus. Lastly, as part of their study Ohmer et al. (2023) create a dataset called SIMPLE FACTS, containing factual questions created through a shared template filled in with language specific factual data." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 205, + 506, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 506, + 315 + ], + "type": "text", + "content": "Consistency evaluation A rather different approach to assess multilinguality in LLMs is to focus not on accuracy across different languages, but to consider whether predictions are consistent across languages. This tests knowledge and skill transfer between languages more explicitly. Two recent examples of studies incorporating consistency-based evaluations on factual knowledge questions are Qi et al. (2023) and Ohmer et al. (2023). Qi et al. (2023) focuseses specifically on sample-level consistency of answers across different languages, requiring existing parallel benchmarks. Ohmer et al. (2023), instead, ask models to translate benchmark questions themselves before answering them again. This can, with some caveats, be applied to any existing monolingual benchmark, but – requiring multiple steps – it is more involved as a paradigm, and is somewhat bottlenecked by the translation ability of the model to be evaluated." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 326, + 504, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 326, + 504, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 504, + 360 + ], + "type": "text", + "content": "Translation as a proxy for multilinguality Another, more implicit method to assess multilinguality in LLMs is to evaluate their ability to translate from one language to another. This approach was famously used by Brown et al. (2020), but has not been common since." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 371, + 506, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 371, + 506, + 492 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 506, + 492 + ], + "type": "text", + "content": "Monolingual non-English evaluation In our discussion, we have focussed on multilingual evaluation options that cover multiple other languages. After all, a benchmark to evaluate models on Bengali (e.g. Shafayat et al., 2024) or Arabic (e.g. Alwajih et al., 2024) can contribute to multilingual evaluation when combined with other benchmarks, but does not so on its own. Because such benchmarks are usually created by language experts for the respective languages, they usually target locally relevant skills and knowledge and are likely of higher quality than benchmarks created for many languages simultaneously (either through translation or from scratch). Yet, composing a suite including many languages that allows direct comparisons between languages remains challenging. We believe such benchmarks can be important for multilingual evaluation in LLMs, but will not further discuss benchmarks focussing on individual languages or very small sets of languages within one family here." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10368/75b91eea-3940-4d8f-9204-ff0cff897b91_content_list.json b/data/2025/2504_10xxx/2504.10368/75b91eea-3940-4d8f-9204-ff0cff897b91_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9028bceb2fecd2d05bc5a62a02e42cdb8084d352 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/75b91eea-3940-4d8f-9204-ff0cff897b91_content_list.json @@ -0,0 +1,5860 @@ +[ + { + "type": "text", + "text": "S1-Bench: A Simple Benchmark for Evaluating System 1 Thinking Capability of Large Reasoning Models", + "text_level": 1, + "bbox": [ + 152, + 89, + 843, + 130 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wenyuan Zhang*, Shuaiyi Nie*, Xinghua Zhang, Zefeng Zhang, Tingwen Liu†", + "bbox": [ + 164, + 151, + 835, + 168 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Institute of Information Engineering, Chinese Academy of Sciences", + "bbox": [ + 220, + 168, + 773, + 185 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "School of Cyber Security, University of Chinese Academy of Sciences", + "bbox": [ + 213, + 186, + 784, + 202 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{zhangwenyuan,nieshuaiyi,liutingwen}@iei.ac.cn", + "bbox": [ + 267, + 203, + 731, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 261, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We introduce S1-Bench, a novel benchmark designed to evaluate the performance of Large Reasoning Models (LRMs) on simple tasks that favor intuitive system 1 thinking rather than deliberative system 2 reasoning. While LRMs have achieved significant breakthroughs in complex reasoning tasks through explicit chains of thought, their heavy reliance on system 2 thinking may limit their system 1 thinking capabilities. However, there is a lack of an appropriate benchmark for evaluating LRM's system 1 thinking capabilities. To fill this gap, S1-Bench introduces a suite of simple, diverse, and natural questions across multiple domains and languages, specifically designed to assess LRMs' performance on questions more suitable for system 1. We conduct extensive evaluations across 28 LRMs, revealing their inefficiency, inadequate accuracy, and limited robustness when handling simple questions. Additionally, we observe a gap between their difficulty perception and generation length. Overall, this work paves the way toward dual-system compatibility in the development of LRMs1.", + "bbox": [ + 144, + 288, + 458, + 629 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 112, + 640, + 258, + 656 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "\"Simplicity is the ultimate sophistication.\"", + "bbox": [ + 109, + 667, + 423, + 682 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "— Leonardo da Vinci", + "bbox": [ + 324, + 682, + 485, + 695 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in Large Reasoning Models (LRMs), notably OpenAI's o1/o3 (OpenAI, 2024) and the DeepSeek-R1 (Guo et al., 2025) series, have propelled the development of Large Language Models (LLMs). Unlike traditional LLMs that exhibit intuitive, heuristic system 1 thinking, LRMs demonstrate deliberate and analytical system 2 reasoning (Qu et al., 2025a; Li et al., 2025b) by explicitly generating external chain-of-thought (COT) (Wei et al., 2022) before producing final answers. Through sophisticated strategies such as", + "bbox": [ + 112, + 702, + 489, + 878 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/80662bbff5d908277978fbb98d9032e60f62b37085981bea22146fedc6fbb295.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BenchmarkCross DomainRealistic ScenariosMulti-lingualAcc.
AIMEXX6.67
GPQAX24.94
Olympiad-Bench27.94
AMCXX31.88
MATHXX58.30
MMLUX66.27
GSM8KXX87.45
ASDIVXX97.51
GSM8K-zeroXXX77.98
RoR-BenchXXX14.24
S1-Bench (ours)100.00
", + "bbox": [ + 524, + 259, + 868, + 431 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Table 1: Characteristics of S1-Bench, with \"Acc.\" representing the average accuracy of four 7-9B LLMs. See Appendix A.1 for more details.", + "bbox": [ + 507, + 441, + 882, + 483 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "self-reflection and multi-path exploration (Li et al., 2025a; Yeo et al., 2025), LRMs can achieve strong performance in tasks that require system 2 thinking, including advanced mathematical and competition-level problems (Yang et al., 2025a).", + "bbox": [ + 507, + 516, + 882, + 596 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, there remains a lack of appropriate benchmarks for evaluating LRMs' system 1 thinking capabilities. Not all real-world problems require system 2 reasoning. The capacity to dynamically identify simple questions and address them efficiently contributes to both resource optimization and improved user satisfaction for LRMs. Nevertheless, current benchmarks either overemphasize difficulty, are simple yet lack domain diversity, or are only not hard for humans but involve unrealistic adversarial designs. Table 1 presents a collection of recent benchmarks aimed at mitigating the overthinking in LRMs (Sui et al., 2025). The majority of these benchmarks are of high difficulty. For example, AIME and GPQA (MAA Committees; Rein et al., 2024) achieve less than $30\\%$ accuracy on conventional small LLMs, which are inherently more suitable for system 2 reasoning. Although some simple mathematical benchmarks are easy enough, such as GSM8K and ASDIV (Cobbe et al.,", + "bbox": [ + 507, + 599, + 884, + 921 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10368v3 [cs.CL] 27 May 2025", + "bbox": [ + 21, + 300, + 60, + 722 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* denotes equal contribution.† denotes corresponding author. \n1The code and benchmark can be found in https://github.com/WRipple/S1_Bench.", + "bbox": [ + 112, + 885, + 487, + 920 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 494, + 942, + 502, + 954 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2021; Miao et al., 2021), they often suffer from limited domain variety. Furthermore, some tasks that pose little challenge to humans but incorporate adversarial elements tend to lack relevance to realistic scenarios, such as GSM8K-zero (Chiang and Lee, 2024), which includes the correct answer in the questions. Thus, a benchmark to assess the system $l$ thinking capability of LRMs is still lacking, further hindering our understanding of LRMs' cognitive flexibility between the two systems (Ziabari et al., 2025; Qu et al., 2025a).", + "bbox": [ + 112, + 84, + 489, + 260 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To fill this research gap, we introduce the System 1 Thinking Capability Benchmark (S1-Bench), which measures the performance of LRMs across various simple tasks that commonly encountered in real-world applications. S1-Bench has the following three characteristics: (1) Simple. The questions are not hard for humans and can be easily answered by LLMs. LLMs with 7-9B parameters can robustly provide correct answers through direct responses when sampled across multiple temperatures. (2) Diverse. S1-Bench is not limited to simple reasoning problems; it encompasses four major categories and 28 subcategories in two languages (English and Chinese), including reasoning problems, commonsense knowledge, instruction following, and analytical problems. (3) Natural. The questions are clear, without any misleading elements or ambiguities, ensuring they can be answered intuitively.", + "bbox": [ + 115, + 262, + 489, + 567 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We conduct extensive evaluations on S1-Bench across 28 LRMs, yielding the following key findings: (1) Current LRMs exhibit inefficiency and lack system $I$ thinking capabilities across all types of questions, with average output lengths 15.5 times longer than small LLMs on S1-Bench. (2) Despite employing deep reasoning, several LRMs exhibit under-accuracy and limited robustness on simple questions. (3) LRMs exhibit \"gut moment\" at the beginning of some reasoning processes, showing gut feelings about task difficulty. Yet, even when recognizing a question's simplicity, LRMs often fail to produce shorter responses—revealing a gap between their difficulty awareness and generation behavior. These findings emphasize the significant distance LRMs must traverse to become powerful dual-system compatible models.", + "bbox": [ + 112, + 569, + 489, + 843 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions can be summarized as follows:", + "bbox": [ + 131, + 843, + 489, + 858 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- To the best of our knowledge, S1-Bench is the first benchmark to evaluate the system 1 thinking capabilities of LRMs, which paves", + "bbox": [ + 134, + 873, + 489, + 921 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the way for dual-system compatibility.", + "bbox": [ + 544, + 84, + 833, + 99 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce a workflow for constructing a simple dataset for system 1 evaluation.", + "- Extensive experiments reveal the inefficiency, under-accuracy, and limited robustness of LRRMs on simple questions.", + "- We find that LRMs exhibit \"gut moment\" on simple problems, and reveal a gap between their difficulty perception and generation length." + ], + "bbox": [ + 531, + 101, + 884, + 244 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related work", + "text_level": 1, + "bbox": [ + 509, + 259, + 660, + 274 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Large Reasoning Models", + "text_level": 1, + "bbox": [ + 509, + 286, + 752, + 300 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Large Reasoning Models (LRMs), characterized by explicitly generating external thinking processes before final answers (Kumar et al., 2025b; Chen et al., 2025a), achieve a paradigm shift from intuitive system 1 thinking to deliberative system 2 reasoning compared to traditional LLMs (Li et al., 2025b; Qu et al., 2025a), thus achieving superior performance on complex tasks. The development of recent LRMs has largely followed two main approaches: large-scale reinforcement learning (RL) and model distillation. Models trained via largescale RL (Guo et al., 2025; Team, 2025b; Team et al., 2025b) leverage reward-based optimization to gradually incentivize deliberative reasoning. In contrast, distillation-based LRMs (OpenAI, 2024; Min et al., 2024; Team, 2025a; Ye et al., 2025; Muennighoff et al., 2025; Zhang et al., 2025b) acquire such abilities by transferring structured reasoning patterns from advanced teacher models.", + "bbox": [ + 507, + 307, + 884, + 613 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 Limitations of LRRMs", + "text_level": 1, + "bbox": [ + 509, + 626, + 721, + 640 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "While LRMs have shown significant performance gains through deliberate reasoning, rigid adherence to this overly cautious thinking can introduce new limitations. On one hand, intermediate reasoning steps can cause excessive token generation and unnecessary solving attempts (Chen et al., 2024b; Hashemi et al., 2025; Kumar et al., 2025a), even leading to redundancy in the hidden layers (Chen et al., 2024c, 2025b). On the other hand, LRMs' performance can drop in specific contexts like safety scenarios (Jiang et al., 2025) and role-playing (Feng et al., 2025). However, prior studies mainly evaluated LRMs on complex tasks that more suited for deliberative system 2 thinking. Our work examines how deliberative reasoning impacts extremely simple problems better matched to intuition-driven system 1 processing.", + "bbox": [ + 507, + 646, + 884, + 921 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 492, + 942, + 504, + 954 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d0c4edc2222d9738a5f4311cff4acb0010d5e1a8ac9defd168dd3f480fe62d03.jpg", + "image_caption": [ + "Figure 1: Construction workflow for S1-Bench and an illustrative example from each major category." + ], + "image_footnote": [], + "bbox": [ + 115, + 80, + 559, + 236 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/d56ce89be3cdd44a24ee6c27fcfae6613fd4bde20dc2301bb2a0b429a48b0392.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 566, + 80, + 882, + 236 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 S1-Bench", + "text_level": 1, + "bbox": [ + 112, + 286, + 231, + 302 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We introduce S1-Bench, a bilingual, multi-domain benchmark designed to evaluate system 1 thinking capability of LRM on extremely simple questions. These questions are easily solvable by traditional LLMs and not hard for humans. S1-Bench, which covers both English and Chinese, is organized into four major categories: reasoning (RSN), knowledge (KNO), instruction following (IF) and analysis (ANA), representing major dimensions commonly employed in LLM capability evaluation (Zheng et al., 2023; Chang et al., 2024).", + "bbox": [ + 112, + 312, + 487, + 488 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section begins with how simplicity is ensured, then the detailed construction workflow for S1-Bench, and concludes with an overview of the dataset statistics. Figure 1 shows the construction workflow and an illustrative example per category.", + "bbox": [ + 112, + 489, + 489, + 571 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 How to Ensure Simplicity?", + "text_level": 1, + "bbox": [ + 112, + 581, + 371, + 596 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We ensure questions are simple and suitable for system 1 thinking through the following two aspects.", + "bbox": [ + 112, + 602, + 489, + 634 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.1 A Priori Simplicity Constraints", + "text_level": 1, + "bbox": [ + 112, + 644, + 420, + 659 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We begin by generating question-answer pairs through collaboration between humans and LLMs. Each pair is required to satisfy both the general and the category-specific simplicity criteria.", + "bbox": [ + 112, + 663, + 487, + 727 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The general simplicity criteria requires that: (1) Questions must be naturally and clearly expressed, unambiguous, and free of intentional traps. (2) Answers must be unique or easily falsifiable (e.g., providing a three-letter English word).", + "bbox": [ + 112, + 728, + 487, + 807 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The category-specific simplicity criteria are as follows. RSN: Limited to problems solvable with minimal reasoning or intuition. KNO: Restricted to common knowledge with unique, verifiable answers from sources like Wikipedia. IF: Involve straightforward instructions without strict formatting requirements. ANA: Limited to questions", + "bbox": [ + 112, + 809, + 489, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "whose answers can be directly inferred from the prompt, such as binary classification. These constraints ensure all questions remain straightforward for human respondents.", + "bbox": [ + 507, + 287, + 884, + 351 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.2 A Posteriori Simplicity Verification", + "text_level": 1, + "bbox": [ + 507, + 363, + 843, + 378 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Due to the biases existing between language models and humans (Gallegos et al., 2024), questions that are simple for humans may be difficult for LLMs. Therefore, we introduce additional posteriori verification to ensure that questions are simple enough to be correctly and robustly answered by smaller LLMs from different families.", + "bbox": [ + 507, + 382, + 884, + 494 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Construction Workflow", + "text_level": 1, + "bbox": [ + 507, + 508, + 741, + 523 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Subcategory Preparation. To ensure diversity, we refer to the subcategories included in existing benchmarks (e.g., MMLU, IFEval, and GSM8K) and evaluation surveys (Chang et al., 2024) to select, merge, or design subcategories for S1-bench, ensuring that each meets the simplicity requirements. The definition and example question for each subcategory can be found in Appendix A.2.", + "bbox": [ + 507, + 531, + 882, + 658 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Implementation of A Priori Simplicity. First, we use two data generators² to create 100 initial bilingual question-answer pairs for each candidate subcategory. The data generation prompt (see Appdiix A.3) explicitly incorporates the subcategory definitions, along with both the general and category-specific simplicity criteria, while also aiming to ensure diversity in the generated questions. Second, these question-answer pairs are then independently evaluated by three annotators and two quality discriminators³ according to the general and category-specific simplicity criteria (see Appdiix A.3 for prompt of discriminators), resulting in five evaluation outcomes per pair. The three an", + "bbox": [ + 507, + 661, + 884, + 885 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "2We select Claude-3.7-Sonnet and Qwen2.5-72B-Instruct.", + "bbox": [ + 527, + 896, + 843, + 907 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "3We select GPT-4o and DeepSeek-V3-241226.", + "bbox": [ + 529, + 908, + 781, + 919 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 492, + 942, + 504, + 953 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/56b5c8368bf0c329ab942c0cfe68ef542389105adef4c16bb5dc358294695d92.jpg", + "image_caption": [ + "Figure 2: Statistical distribution of token counts for S1-Bench questions." + ], + "image_footnote": [], + "bbox": [ + 131, + 87, + 473, + 192 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "notators are experienced graduate students familiar with LLMs and well-acquainted with the goals of S1-Bench. Finally, based on these evaluation outcomes, three annotators discuss and collectively decide whether to retain, modify, or discard each question.", + "bbox": [ + 112, + 263, + 487, + 357 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Implementation of A Posteriori Simplicity. First, each question obtained from the previous stage is input into the small LLM $\\text{validators}^4$ with 7~9 B parameters. For each question, we sample 10 answers at three different temperature settings (0, 0.2, and 0.4), resulting in a total of 30 responses per question. These responses are then individually evaluated for correctness using GPT-4o. Second, if all 30 sampled responses are correct, the question is accepted into S1-Bench. Otherwise, the question is returned to the generators, where a difficulty-reduction prompt (see Appendix 10) is applied to simplify it. The simplified questions then undergoes the same subsequent process. Finally, questions fail to meet the full-accuracy criterion (i.e., 30 out of 30 correct) after three rounds of difficulty reduction are excluded from the workflow.", + "bbox": [ + 115, + 360, + 489, + 632 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The final S1-Bench comprises questions that satisfy both human-based a priori simplicity constraints and LLM-based a posteriori simplicity verification.", + "bbox": [ + 112, + 633, + 489, + 697 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Benchmark Statistics", + "text_level": 1, + "bbox": [ + 114, + 709, + 327, + 724 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "S1-Bench comprises 422 question-answer pairs across four major categories and 28 subcategories, balanced with 220 English and 202 Chinese questions. Figure 2 shows the token length distribution, with questions averaging 14.46 tokens. To ensure that the a posteriori verification process does not introduce simplicity only tailored to the small LLM validator, we evaluate S1-Bench on five additional LLMs and on Qwen3 Family with reasoning modes disabled. As shown, even the 1.7B model achieves", + "bbox": [ + 112, + 730, + 487, + 890 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/459125bdd88435e5e17571c620196788d6db9d5c1a0cd55e63d205d33c29cd05.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Modelt=0.0t=0.2t=0.4Tokens
Gemma2-9B100.00100.00100.0038.77
Llama3.1-8B100.00100.00100.0042.00
Mistral-8B100.00100.00100.0044.38
Qwen2.5-7B100.00100.00100.0042.81
DeepSeek-v3100.00100.00100.0079.53
Llama3.3-70B100.0099.7699.7653.71
Qwen2.5-14B99.7499.7699.7640.00
Qwen2.5-32B99.9899.9899.9843.17
Qwen2.5-72B100.00100.00100.0044.61
Qwen3-32B (w/o think)100.00100.00100.00103.30
Qwen3-14B (w/o think)100.00100.00100.0086.35
Qwen3-8B (w/o think)100.00100.0099.7690.54
Qwen3-1.7B (w/o think)98.1097.1695.73114.32
", + "bbox": [ + 510, + 80, + 882, + 266 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 2: Average accuracy (acc@k) and response token count of different LLMs, each sampled 10 times at three temperature settings.", + "bbox": [ + 507, + 275, + 882, + 318 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "over $98\\%$ accuracy at temperature 0.", + "bbox": [ + 507, + 344, + 781, + 359 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 Main Experiment", + "text_level": 1, + "bbox": [ + 507, + 370, + 697, + 387 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1 Baseline Models and Configurations", + "text_level": 1, + "bbox": [ + 507, + 397, + 838, + 413 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We evaluated 28 different LRMs, which are explicitly trained to first respond with a thinking process, and then generate a final answer. These LRMs include open-source families, such as DeepSeek (Guo et al., 2025), Qwen (Yang et al., 2025a), Nemotron (Bercovich et al., 2025), LightR1 (Wen et al., 2025), s1.1 (Muennighoff et al., 2025), EXAONE (Research et al., 2025), and SkyT1 (Griggs et al., 2025), as well as closed-source Hunyuan-T1 (Tencent, 2025), spanning from tiny (1.5B) to large (671B) parameter sizes5. Notably, OpenAI's o-series models are not included as they do not disclose thinking processes to users. For each model, we consider two sets of generation configurations: Greedy sampling with temperature $t = 0$ ; Top-p sampling with temperature $t = 0.6$ , topp=0.95 and sampling size $k = 5$ . Only top-p sampling results are reported in the main text; greedy decoding results are provided in the Appendix C.1.", + "bbox": [ + 505, + 417, + 885, + 724 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2 Evaluation Metrics", + "text_level": 1, + "bbox": [ + 507, + 734, + 705, + 747 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Format Metrics. To assess the formatting quality of LRM responses, we compute the proportion of responses that satisfy the following two formatting criteria (averaged over 5 runs for top-p sampling). S-Corr (Strict Format Correctness Rate): In general, an end thinking marker (e.g., $\\langle$ /think $\\rangle$ ) is expected to separate the thinking process from the non-empty final answer. S-Corr measures the proportion of responses that satisfy this criterion.", + "bbox": [ + 505, + 753, + 882, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "4We select four small LLMs: Qwen2.5-7B, Llama3.1-8B, Mistral8B, and Gemma2-9B. The full model IDs are detailed in Table B.1.", + "bbox": [ + 112, + 898, + 487, + 919 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "Model details are presented in Appendix B.1.", + "bbox": [ + 527, + 907, + 781, + 920 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 492, + 942, + 504, + 954 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/9efac38809cbfd25fe32a6229d1c217cbb74c40a384103a7d9451c156c39c7ab.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model IDSizeLoose FormatStrict FormatL-Corr ↑S-Corr ↑Tokens ↓
pass@1↑acc@k↑pass@1↑acc@k↑
Validator LLMs7-9B100.00100.00100.00100.00--42.00
Qwen3-A22B235B99.9199.7699.9199.76100.00100.00701.65
Qwen3-A3B30B99.9599.7699.9599.76100.00100.00638.40
QwQ-32B32B100.00100.00100.00100.00100.00100.00720.10
Qwen3-32B32B99.9199.5399.9199.5399.9199.91668.69
Qwen3-14B14B99.9599.7699.9599.7699.9599.95582.99
Qwen3-8B8B99.9599.7699.9599.7699.9599.95657.76
Qwen3-1.7B1.7B99.3497.3999.3497.3999.8199.81595.90
Hunyuan-T1-99.9199.5399.9199.53100.00100.00542.31
DS-R1671B100.00100.00100.00100.00100.00100.00646.40
DS-R1-70B70B99.4897.3999.3896.92100.0099.91453.81
DS-R1-32B32B99.7298.8299.7298.82100.00100.00429.91
DS-R1-14B14B99.5797.8799.5797.87100.00100.00475.46
DS-R1-8B8B97.4497.1697.3997.1699.7699.53452.11
DS-R1-7B7B95.2185.7895.2185.7899.2499.24454.55
DS-R1-1.5B1.5B81.4754.5081.4754.5097.5897.58489.54
Sky-T1-32B32B98.8294.7994.8879.6299.4895.26163.00
Nemotron-49B49B99.1597.3999.1597.39100.00100.00362.54
Nemotron-8B8B86.1669.9179.8159.0099.4384.31372.57
L-R1-32B32B97.8791.0094.7479.6298.9195.071095.36
L-R1-32B-DS32B99.5798.1099.5798.1099.8199.81524.12
L-R1-14B-DS14B99.0595.9799.0595.9799.1999.19693.19
L-R1-7B-DS7B94.6483.6594.6483.6599.7699.67496.47
s1.1-32B32B99.5398.3499.4898.1099.5799.53998.00
s1.1-14B14B97.6393.6097.2591.9497.7797.39839.86
s1.1-7B7B96.6888.3988.5863.9897.1188.96711.49
EXAONE-32B32B97.0694.0897.0694.0899.8199.81800.56
EXAONE-7.8B7.8B88.1575.1287.8274.4198.7298.061046.87
EXAONE-2.4B2.4B72.4256.1672.3256.1697.4497.251593.96
", + "bbox": [ + 196, + 82, + 801, + 399 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 3: Main results in the top-p sampling setting on the S1-Bench, sorted by model family. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst.", + "bbox": [ + 112, + 409, + 882, + 439 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "L-Corr (Loose Format Correctness Rate): LRMs may occasionally generate responses with endless thinking. L-Corr quantifies the proportion of responses that do not exhibit this failure mode. Detailed format types are given in Appendix B.4.", + "bbox": [ + 112, + 463, + 487, + 545 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Efficiency Metrics. We calculate the average token counts for responses (Tokens) except for those generate endless thinking. Token counts are obtained using the Qwen2.5 tokenizer.", + "bbox": [ + 112, + 565, + 489, + 630 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Accuracy Metrics. We calculate accuracy metrics under both strict and loose formatting requirements, respectively. We use GPT-4o as the evaluator to assess the correctness of the responses6, with the evaluation prompt in Appendix B.2. For greedy sampling, we directly calculate the accuracy rate. For top-p sampling, we utilize two metrics: Pass@1 and Acc@k. Pass@1 follows DeepSeekR1 (Guo et al., 2025), and Acc@k is the percentage of questions with all k answers correct. The two metrics use k=5, and their detailed definitions can be found in Appendix B.3. Notably, S-Corr \\ L-Corr represents the upper bound for pass@1 and acc@5 under strict \\ loose formatting requirements.", + "bbox": [ + 112, + 651, + 489, + 876 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3 Main Results", + "text_level": 1, + "bbox": [ + 507, + 463, + 660, + 478 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 3 and Figure 8 presents the main results of LRMs on S1-Bench, revealing two key phenomena.", + "bbox": [ + 507, + 488, + 882, + 520 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "LRMs exhibit significantly lower efficiency than LLMs on S1-Bench, and no clear correlation is observed between ART and model size. We observed the following: First, state-of-the-art LRMs, such as DeepSeek-R1 and Qwen3, do not demonstrate a distinct advantage in efficiency. In contrast, Sky-T1-32B, which undergoes specific optimizations to mitigate overthinking using SimPO, achieves the highest efficiency. Second, The L-R1-DS 7B/14B/32B models are further post-trained from the DS-R1-7B/14B/32B models. The L-R1-DS models tend to produce longer responses, suggesting that while additional post-training may enhance the model's capability for complex reasoning, it comes at the cost of response efficiency. Finally, the s1.1 models generate considerably longer responses than the DeepSeek-R1-Distilled models. Despite both models being trained solely with SFT to acquire long-COT reasoning ability, the DeepSeek-R1-Distilled models use 800K training samples, while the s1.1 models are trained on only 1K. This discrepancy suggests that the smaller training set may lead to superficial imitation of long reasoning patterns, resulting in verbose thinking on", + "bbox": [ + 507, + 535, + 884, + 921 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "If a final answer can be isolated, only the final answer is evaluated; otherwise, the entire response is assessed.", + "bbox": [ + 112, + 898, + 487, + 921 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 492, + 942, + 504, + 954 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/502c944225ec8d466199e1ad359f3b995cee1bf14968dec9ed6c33126c4de2dc.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model IDSizeS1-Bench-ENS1-Bench-ZHAvg
RSNKNOIFANAAvgRSNKNOIFANAAvg
Gemma2-9B9B74.829.45.352.445.951.619.87.535.131.038.8
Llama3.1-8B8B91.035.412.461.956.044.028.315.218.726.742.0
Qwen2.5-7B7B65.546.36.449.646.550.546.69.836.938.842.8
Mistral-8B8B67.255.58.650.149.647.356.114.829.738.744.4
Column Avg-74.641.68.253.549.548.337.711.830.133.842.0
Sky-T1-32B32B215.8174.198.5233.3194.3125.5125.399.4145.5128.9163.0
Nemotron-49B49B599.7587.6396.5526.1540.4232.9157.3235.5107.8168.8362.5
Nemotron-8B8B561.0585.1458.0303.1462.6369.5326.0288.1166.7273.5372.6
DS-R1-32B32B421.8504.4414.7521.1473.7362.2385.6343.1408.8382.2429.9
DS-R1-8B8B472.2528.9530.7462.7491.2521.9404.4266.2395.5409.4452.1
DS-R1-70B70B464.1501.3378.5536.1484.0450.8450.2328.4416.7420.9453.8
DS-R1-7B7B447.5623.9353.8510.0495.5446.5463.2339.5373.0409.4454.5
DS-R1-14B14B503.7674.7367.3494.2519.0452.0465.4375.3405.8428.0475.5
DS-R1-1.5B1.5B480.8584.7417.4577.2529.1493.0497.4329.8423.1446.0489.5
L-R1-7B-DS7B568.1667.1501.7566.3580.3444.8454.6344.1366.4405.0496.5
L-R1-32B-DS32B574.5706.6647.6632.8636.3431.2367.0377.1418.7402.2524.1
Hunyuan-T1-561.6693.8380.9435.0521.2676.8553.8505.1523.8565.3542.3
Qwen3-14B14B700.4639.5286.2575.0579.8730.4557.2403.1586.0586.5583.0
Qwen3-1.7B1.7B790.4720.6399.9526.2624.6689.8563.6406.4545.9564.7595.9
Qwen3-A3B30B745.0729.3328.1594.8625.7773.7655.8453.7648.6652.2638.4
DS-R1671B786.1723.8711.4529.2672.5727.3638.5607.9533.9617.9646.4
Qwen3-8B8B853.7753.1394.4629.5683.2749.2623.8459.3624.0630.0657.8
Qwen3-32B32B805.7774.2356.9645.5674.7780.2695.2446.6645.3662.1668.7
L-R1-14B-DS14B951.01026.0829.8653.5848.2594.7610.1442.2451.7525.7693.2
Qwen3-A22B235B925.3864.3487.2605.7734.5803.3713.4487.2611.3665.9701.7
s1.1-7B7B1039.5840.81923.2529.4929.9489.6351.31034.3332.4475.6711.5
QwQ-32B32B873.3808.1520.8634.7722.4866.9707.3613.3667.7717.6720.1
EXAONE-32B32B1323.71057.61537.0711.61086.4703.2348.61302.9125.5490.3800.6
s1.1-14B14B871.8746.22233.1708.1960.2654.6546.01512.6579.7710.7839.9
s1.1-32B32B1077.9889.72055.4781.71081.7995.6765.21634.6666.5906.5998.0
EXAONE-7.8B7.8B1498.31398.91775.7882.41303.81410.3497.81633.1205.0767.01046.9
L-R1-32B32B1614.01217.31996.9930.11338.31035.6737.71240.7610.2835.31095.4
EXAONE-2.4B2.4B1927.31426.21200.1825.71320.72469.71622.62471.61511.21898.71594.0
Column Avg-809.1766.0785.1591.5718.4695.8545.9677.9482.0576.7650.3
Improvement-×10.8×18.4×96.0×11.1×14.5×14.4×14.5×57.3×16.0×17.1×15.5
", + "bbox": [ + 159, + 80, + 836, + 504 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 4: Average response tokens in the top-p sampling setting on the S1-bench across two languages and four main categories. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst. Bold represents the maximum Improvement value for each language.", + "bbox": [ + 112, + 514, + 882, + 558 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "simple questions.", + "bbox": [ + 112, + 583, + 247, + 599 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Several LRMs exhibit under-accuracy and limited robustness on simple questions. First, our observations find that, despite employing deep reasoning, most LRMs tend to exhibit lower accuracy on simple questions compared to traditional LLMs. For example, DS-R1-1.5B and EXAONE-2.4B achieve just above $50\\%$ acc@k. Second, many LRMs struggle with robust correctness in top-p sampling, where acc@k is significantly lower than pass@1. This issue is particularly pronounced in smaller LRMs. For instance, DS-R1-1.5B achieved $81.47\\%$ pass@1 but only $54.50\\%$ acc@k.", + "bbox": [ + 112, + 612, + 489, + 804 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Efficiency Analysis", + "text_level": 1, + "bbox": [ + 112, + 821, + 314, + 835 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Analysis across Question Types", + "text_level": 1, + "bbox": [ + 112, + 850, + 405, + 866 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To better understand the efficiency differences of LRRMs across question types, we analyze the average response tokens across 4 main categories, 28", + "bbox": [ + 112, + 873, + 489, + 921 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "subcategories, and two languages. The results are displayed in Table 4 and Appendix C.2.", + "bbox": [ + 507, + 583, + 880, + 615 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "LRMs exhibit a substantial increase in response length across all four major categories, 28 subcategories, and two languages. As shown in Table 4, for each of the four major categories, the average response length of LRMs exceeds that of LLMs by more than a factor of ten. Response lengths also increase significantly across all subcategories (see Appendix C.2). This suggests that while LRMs are primarily trained on reasoning data to produce long CoT style responses, this stylistic pattern generalizes well across a wide range of question types.", + "bbox": [ + 507, + 623, + 882, + 816 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Moreover, 23 out of the 28 LRMs produce longer responses to questions in English than Chinese.", + "bbox": [ + 507, + 816, + 880, + 848 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "LRMs exhibit the most significant increase in ART for instruction following questions and tend to over-exlore when the solution space is vast. As shown in Table 4, although small LLMs", + "bbox": [ + 507, + 857, + 880, + 920 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 492, + 942, + 505, + 954 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7a3795cc2e55a9b576ec292b5aa56d5cdc6ea397ea09993746b001df9df3ab87.jpg", + "image_caption": [ + "Figure 3: (a) Comparison of first round and additional token costs for each LRM. (b) Distribution of solution rounds for each LRM." + ], + "image_footnote": [], + "bbox": [ + 122, + 82, + 480, + 281 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "provide the most concise responses to instruction following questions, LRMs generate dramatically longer outputs—96.0 times longer in English and 57.3 times longer in Chinese than small LLMs. To investigate the cause, we further analyze the subcategories of instruction following questions. As shown in Appendix C.2, average tokens is notably longer in the subcategories of length constraints, character constraints, and sentence constraints. These three question types share a similar characteristic: their correctness is easy to verify, but the solution space is vast. We find that, although the model quickly identifies a correct answer, it becomes trapped in the search space, continually exploring alternatives and failing to stop in time. A case can be seen in Table 21. This phenomenon is more pronounced in families with lower efficiency, such as s1.1 and EXAONE.", + "bbox": [ + 112, + 366, + 489, + 653 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2 Thinking Solution Analysis", + "text_level": 1, + "bbox": [ + 112, + 675, + 376, + 690 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To better understand the causes of inefficiency in LRMs on S1-Bench, we analyze the solution rounds of their thinking processes7. We first use DeepSeek-v3 to segment each thinking process into several solutions, each defined as a point at which LRMs explicitly arrives at a conclusion that matches the correct answer. We then compute the average token counts in the first solution. The detailed experimental setup is provided in Appendix C.3. Our analysis reveals the following:", + "bbox": [ + 112, + 701, + 489, + 860 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/165f30c01295e8eb67f8483f5b0550795a5819862b0e4f5ca8b4d0a0d987a2ac.jpg", + "image_caption": [ + "Figure 4: Distribution of the thinking process across four categories. FA and TP refer to Final Answer and Thinking Process, respectively. Green bars indicate cases where the final answer is correct, while red bars indicate cases where it is incorrect." + ], + "image_footnote": [], + "bbox": [ + 517, + 84, + 875, + 218 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The token consumed in the first solution of LRMs significantly exceeds that of validator LLMs, as shown in Figure 3 (a). This suggests that LRMs may involve unnecessary reasoning steps in each solution, which could be one of the reasons for their inefficiency.", + "bbox": [ + 507, + 330, + 884, + 426 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The primary reason for efficiency gaps between LRMs lies in the number of redundant solution rounds they generate, rather than the token cost in the initial round. As shown in Figure 3 (a), although total thinking token counts vary widely across LRMs, their token counts in the initial round are similar and only account for a small fraction of the total. Figure 3 (b) further shows the distribution of solution rounds on S1-Bench, revealing that LRMs with longer thinking processes tend to generate more solution round, and this redundancy greatly increases computational cost. Furthermore, further experiments reveal that the redundancy in the reasoning process gradually increases over time. Appendix C.4 presents the experimental details.", + "bbox": [ + 507, + 439, + 884, + 682 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6 Error Analysis", + "text_level": 1, + "bbox": [ + 507, + 699, + 672, + 715 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This section analyzes the errors made in the thinking process. Specifically, we utilize DeepSeekv3 to categorize the responses of LRMs into four cases and compute the corresponding proportions: (1) Final answer correct; thinking process entirely accurate. (2) Final answer correct; thinking process contains intermediate errors. (3) Final answer incorrect; correct answer mentioned in thinking process. (4) Final answer incorrect; correct answer never mentioned in thinking process. The classification details are in Appendix C.5; results are shown in Figure 4. Key findings include:", + "bbox": [ + 505, + 728, + 884, + 921 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "7We only analyze well-formatted thinking processes with correct final answers, as incorrect answers make it unclear whether LRMs are over-reasoning or under-reasoning, and malformed thinking processes cannot be precisely extracted.", + "bbox": [ + 112, + 878, + 487, + 921 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 492, + 941, + 504, + 953 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/36f5f6a81bdd9b531a8ac7893e1e2c26b7fb338fe001e3626140dc3806b85f1b.jpg", + "image_caption": [ + "Figure 5: Top: Count of \"gut moments\" across models. Bottom: Probability of \"gut moments\" by question type." + ], + "image_footnote": [], + "bbox": [ + 129, + 84, + 473, + 281 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Lower-accuracy LRMs tend to produce less reliable reasoning chains; even when they arrive at the correct final answer, their intermediate steps often contain errors (light green). LRMs with high accuracy (e.g., DS-R1) show almost no flawed reasoning steps, whereas those with lower accuracy (e.g., DS-R1-1.5B) often generate incorrect intermediate conclusions, further indicating that they lack robust reasoning ability.", + "bbox": [ + 112, + 347, + 489, + 491 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Although LRMs sometimes mention the correct answer during reasoning, they may deviate and ultimately produce incorrect final answers (light red). In one case, the LRM initially arrived at the correct answer but undermined it through excessive verification, a case can be seen in Table 24. In another case, the LRM directly denies the correct answer, a case can be seen in Table 23.", + "bbox": [ + 112, + 500, + 489, + 629 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7 Gut Moment", + "text_level": 1, + "bbox": [ + 112, + 640, + 262, + 655 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We observe an intriguing phenomenon on S1-Bench: LRMs sometimes show an early sense of question difficulty before solving, which we call the \"gut moment.\" To explore this phenomenon, we prompt GPT-4o to classify the initial part of model responses (before the first \"\\n\\ntypes based on its comment on difficulty: easy, neutral, difficult, and no comment. Figure 5 presents these classifications and their probabilities across four question types. Experimental details and cases are in Appendix C.6. This leads to the following observations:", + "bbox": [ + 112, + 665, + 489, + 854 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "First, all LRMs show the \"gut moment\" phenomenon to varying degrees, which is more evident", + "bbox": [ + 112, + 858, + 489, + 890 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a5c4a6d06d561fec23d5e14de192381c1db489b74d1bad7bd2dc349cd4dde9ae.jpg", + "image_caption": [ + "Figure 6: Average response tokens in the easy category vs. all samples. Dots show difference: easy minus all." + ], + "image_footnote": [], + "bbox": [ + 521, + 85, + 870, + 200 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "in the Qwen, DeepSeek, and Light-R1 families and Hunyuan-T1. Second, LRMs show stylistic differences in expressing \"gut moment.\" For example, the Qwen family often views questions as simple, whereas the DeepSeek-distilled models show more diverse difficulty comments. Third, some LRMs show significantly stronger \"gut moment\" in Chinese than in English, such as the Qwen and DeepSeek families, likely due to a higher proportion of Chinese in their training data. Finally, the \"gut moment\" is most evident in reasoning questions and rarely appears in analytical questions, except in DeepSeek-distilled models.", + "bbox": [ + 505, + 277, + 884, + 487 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To investigate whether the early sense of a question as \"easy\" leads to a corresponding reduction in response length, we compare the average response tokens for questions in the easy category versus all samples. The results are shown in Figure 6. Except for L-R1-32B, other LRMs do not exhibit a noticeable decrease in response length when questions are viewed as \"easy\"; in fact, 21 out of 28 LRMs showed an increase in response length under this condition. This suggests a discrepancy between the LRM's initial sense of difficulty and its generative behavior, the causes and improvements of which warrant further investigation.", + "bbox": [ + 507, + 491, + 882, + 700 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "8 Conclusion", + "text_level": 1, + "bbox": [ + 507, + 725, + 640, + 739 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This paper introduces S1-Bench, the first benchmark designed to evaluate system 1 thinking capabilities in LRMs. We conduct extensive evaluations across 28 LRMs, revealing their inefficiency, inadequate accuracy, and limited robustness when handling simple questions. Additionally, we observe \"gut moment\" and find a gap between their difficulty perception and generation length. Overall, this work paves the way toward dual-system compatibility in the development of LRMs.", + "bbox": [ + 505, + 760, + 884, + 921 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "8Derived from \"gut feeling,\" meaning intuition-based judgment without analysis.", + "bbox": [ + 112, + 898, + 487, + 920 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 492, + 942, + 504, + 954 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations", + "text_level": 1, + "bbox": [ + 114, + 84, + 220, + 99 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Although S1-Bench pioneers the evaluation of system 1 thinking in LRMs, it still has several limitations. First, due to our emphasis on ensuring the uniqueness of each sample during dataset construction—for instance, including only one question for basic arithmetic operations such as addition, subtraction, and multiplication—the overall scale of the benchmark remains limited. As a next step, we plan to expand the scale of S1-Bench. Second, while recent months have seen a surge in newly released open-source LRMs, we have only evaluated 28 representative models and have not covered the full spectrum of available models. Lastly, we do not propose methods to improve the efficiency of LRMs on system 1 tasks in this work; this will be the focus of our future research.", + "bbox": [ + 112, + 111, + 492, + 367 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 397, + 213, + 413 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Pranjal Aggarwal and Sean Welleck. 2025. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697.", + "AI-MO. 2024. Amc 2023.", + "Daman Arora and Andrea Zanette. 2025. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463.", + "Simon A Aytes, Jinheon Baek, and Sung Ju Hwang. 2025. Sketch-of-thought: Efficient llm reasoning with adaptive cognitive-inspired sketching. arXiv preprint arXiv:2503.05179.", + "Akhiad Bercovich, Itay Levy, Izik Golan, Mohammad Dabbah, Ran El-Yaniv, Omri Puny, Ido Galil, Zach Moshe, Tomer Ronen, Najeeb Nabwani, Ido Shahaf, Oren Tropp, Ehud Karpas, Ran Zilberstein, Jiaqi Zeng, Soumye Singhal, Alexander Bukharin, Yian Zhang, Tugrul Konuk, and 113 others. 2025. Llamameton: Efficient reasoning models. Preprint, arXiv:2505.00949.", + "Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, and 1 others. 2024. A survey on evaluation of large language models. ACM transactions on intelligent systems and technology, 15(3):1-45.", + "Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. 2025a. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567." + ], + "bbox": [ + 115, + 420, + 489, + 919 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Xiaoshu Chen, Sihang Zhou, Ke Liang, and Xinwang Liu. 2024a. Distilling reasoning ability from large language models with adaptive thinking. arXiv preprint arXiv:2404.09170.", + "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Quzhi Liu, Mengfei Zhou, Zhuosheng Zhang, and 1 others. 2024b. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187.", + "Yilong Chen, Junyuan Shang, Zhengyu Zhang, Jiawei Sheng, Tingwen Liu, Shuohuan Wang, Yu Sun, Hua Wu, and Haifeng Wang. 2024c. Mixture of hidden-dimensions transformer. arXiv preprint arXiv:2412.05644.", + "Yilong Chen, Junyuan Shang, Zhenyu Zhang, Yanxi Xie, Jiawei Sheng, Tingwen Liu, Shuohuan Wang, Yu Sun, Hua Wu, and Haifeng Wang. 2025b. Inner thinking transformer: Leveraging dynamic depth scaling to foster adaptive internal thinking. arXiv preprint arXiv:2502.13842.", + "Cheng-Han Chiang and Hung-yi Lee. 2024. Overreasoning and redundant calculation of large language models. In Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 2: Short Papers), pages 161-169, St. Julian's, Malta. Association for Computational Linguistics.", + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, and 1 others. 2021. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168.", + "Yingqian Cui, Pengfei He, Jingying Zeng, Hui Liu, Xianfeng Tang, Zhenwei Dai, Yan Han, Chen Luo, Jing Huang, Zhen Li, and 1 others. 2025. Stepwise perplexity-guided refinement for efficient chain-of-thought reasoning in large language models. arXiv preprint arXiv:2502.13260.", + "Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, and 1 others. 2025. Dynamic parallel tree search for efficient llm reasoning. arXiv preprint arXiv:2502.16235.", + "Xiachong Feng, Longxu Dou, and Lingpeng Kong. 2025. Reasoning does not necessarily improve roleplaying ability. arXiv preprint arXiv:2502.16940.", + "Yichao Fu, Junda Chen, Siqi Zhu, Zheyu Fu, Zhongdongming Dai, Aurick Qiao, and Hao Zhang. 2024. Efficiently serving llm reasoning programs with certainindex. arXiv preprint arXiv:2412.20993.", + "Yichao Fu, Junda Chen, Yonghao Zhuang, Zheyu Fu, Ion Stoica, and Hao Zhang. 2025. Reasoning without self-doubt: More efficient chain-of-thought through certainty probing. In ICLR 2025 Workshop on Foundation Models in the Wild." + ], + "bbox": [ + 510, + 85, + 884, + 919 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 492, + 942, + 504, + 953 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Isabel O Gallegos, Ryan A Rossi, Joe Barrow, Md Mehrab Tanjim, Sungchul Kim, Franck Dernoncourt, Tong Yu, Ruiyi Zhang, and Nesreen K Ahmed. 2024. Bias and fairness in large language models: A survey. Computational Linguistics, 50(3):1097-1179.", + "Tyler Griggs, Shiyi Cao, Dacheng Li, Shu Liu, Shishir G. Patil, Matei Zaharia, Joey Gonzalez, and Ion Stoica. 2025. Think less, achieve more: Cut reasoning costs by $50\\%$ without sacrificing accuracy.", + "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, and 1 others. 2025. DeepSeek-R1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948.", + "Tingxu Han, Zhenting Wang, Chunrong Fang, Shiyu Zhao, Shiqing Ma, and Zhenyu Chen. 2024. Token-budget-aware llm reasoning. arXiv preprint arXiv:2412.18547.", + "Masoud Hashemi, Oluwanifemi Bamgp Bose, Sathwik Tejaswi Madhusudhan, Jishnu Sethumadhavan Nair, Aman Tiwari, and Vikas Yadav. 2025. Dna bench: When silence is smarter-benchmarking over-reasoning in reasoning llms. arXiv preprint arXiv:2503.15793.", + "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, and 1 others. 2024. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008.", + "Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2021a. Measuring massive multitask language understanding. In International Conference on Learning Representations.", + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021b. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874.", + "Bairu Hou, Yang Zhang, Jiabao Ji, Yujuan Liu, Kaizhi Qian, Jacob Andreas, and Shiyu Chang. 2025. Thinkprune: Pruning long chain-of-thought of llms via reinforcement learning. arXiv preprint arXiv:2504.01296.", + "Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. 2025. Livecodebench: Holistic and contamination free evaluation of large language models for code. In The Thirteenth International Conference on Learning Representations." + ], + "bbox": [ + 115, + 85, + 489, + 917 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Fengqing Jiang, Zhangchen Xu, Yuetai Li, Luyao Niu, Zhen Xiang, Bo Li, Bill Yuchen Lin, and Radha Poovendran. 2025. Safechain: Safety of language models with long chain-of-thought reasoning capabilities. arXiv preprint arXiv:2502.12025.", + "Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. 2025a. Overthink: Slowdown attacks on reasoning llms. arXiv e-prints, pages arXiv-2502.", + "Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip HS Torr, Salman Khan, and Fahad Shahbaz Khan. 2025b. Llm post-training: A deep dive into reasoning large language models. arXiv preprint arXiv:2502.21321.", + "Ayeong Lee, Ethan Che, and Tianyi Peng. 2025. How well do llms compress their own chain-of-thought? a token complexity approach. arXiv preprint arXiv:2503.01141.", + "Dacheng Li, Shiyi Cao, Tyler Griggs, Shu Liu, Xiangxi Mo, Eric Tang, Sumanth Hegde, Kourosh Hakhamaneshi, Shishir G Patil, Matei Zaharia, and 1 others. 2025a. Llms can easily learn to reason from demonstrations structure, not content, is what matters! arXiv preprint arXiv:2502.07374.", + "Yiwei Li, Peiwen Yuan, Shaoxiong Feng, Boyuan Pan, Xinglin Wang, Bin Sun, Heda Wang, and Kan Li. 2024. Escape sky-high cost: Early-stopping self-consistency for multi-step reasoning. arXiv preprint arXiv:2401.10480.", + "Zhong-Zhi Li, Duzhen Zhang, Ming-Liang Zhang, Ji-axin Zhang, Zengyan Liu, Yuxuan Yao, Haotian Xu, Junhao Zheng, Pei-Jie Wang, Xiuyi Chen, and 1 others. 2025b. From system 1 to system 2: A survey of reasoning large language models. arXiv preprint arXiv:2502.17419.", + "Baohao Liao, Yuhui Xu, Hanze Dong, Junnan Li, Christof Monz, Silvio Savarese, Doyen Sahoo, and Caiming Xiong. 2025. Reward-guided speculative decoding for efficient ltm reasoning. arXiv preprint arXiv:2501.19324.", + "Ximing Lu, Seungju Han, David Acuna, Hyunwoo Kim, Jaehun Jung, Shrimai Prabhumoye, Niklas Muennighoff, Mostofa Patwary, Mohammad Shoeybi, Bryan Catanzaro, and 1 others. 2025. Retro-search: Exploring untaken paths for deeper and efficient reasoning. arXiv preprint arXiv:2504.04383.", + "Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. 2025. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning. arXiv preprint arXiv:2501.12570.", + "Wenjie Ma, Jingxuan He, Charlie Snell, Tyler Griggs, Sewon Min, and Matei Zaharia. 2025a. Reasoning models can be effective without thinking. arXiv preprint arXiv:2504.09858." + ], + "bbox": [ + 510, + 85, + 882, + 920 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 489, + 942, + 510, + 954 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Xinyin Ma, Guangnian Wan, Runpeng Yu, Gongfan Fang, and Xinchao Wang. 2025b. Cot-valve: Length-compressible chain-of-thought tuning. arXiv preprint arXiv:2502.09601.", + "MAA Committees. Aime problems and solutions. https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions.", + "Shen-Yun Miao, Chao-Chun Liang, and Keh-Yih Su. 2021. A diverse corpus for evaluating and developing english math word problem solvers. arXiv preprint arXiv:2106.15772.", + "Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, and 1 others. 2024. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393.", + "Tergel Munkhbat, Namgyu Ho, Seo Hyun Kim, Yongjin Yang, Yujin Kim, and Se-Young Yun. 2025. Self-training elicits concise reasoning in large language models. URL https://arxiv.org/abs/2502.20122.", + "OpenAI. 2024. Learning to reason with LLMs. https://openai.com/index/learning-to-reason-with-11ms/.", + "Rui Pan, Yinwei Dai, Zhihao Zhang, Gabriele Oliaro, Zhihao Jia, and Ravi Netravali. 2025. Specreason: Fast and accurate inference-time compute via speculative reasoning. arXiv preprint arXiv:2504.07891.", + "Arkil Patel, Satwik Bhattachamishra, and Navin Goyal. 2021. Are nlp models really able to solve simple math word problems? In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2080-2094.", + "Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, and 1 others. 2025a. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond. arXiv preprint arXiv:2503.21614.", + "Yuxiao Qu, Matthew YR Yang, Amrith Setlur, Lewis Tunstall, Edward Emanuel Beeching, Ruslan Salakhutdinov, and Aviral Kumar. 2025b. Optimizing test-time compute via meta reinforcement finetuning. arXiv preprint arXiv:2503.07572.", + "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2024. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling." + ], + "bbox": [ + 115, + 85, + 485, + 919 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "LG Research, Kyunghoon Bae, Eunbi Choi, Kibong Choi, Stanley Jungkyu Choi, Yemuk Choi, Seokhee Hong, Junwon Hwang, Hyojin Jeon, Kijeong Jeon, and 1 others. 2025. Exaone deep: Reasoning enhanced language models. arXiv preprint arXiv:2503.12524.", + "Jianshu She, Zhuohao Li, Zhemin Huang, Qi Li, Peiran Xu, Haonan Li, and Qirong Ho. 2025. Hawkeye: Efficient reasoning with model collaboration. arXiv preprint arXiv:2504.00424.", + "Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. 2025a. Dast: Difficulty-adaptive slow-thinking for large reasoning models. arXiv preprint arXiv:2503.04472.", + "Zhenyi Shen, Hanqi Yan, Linhai Zhang, Zhanghao Hu, Yali Du, and Yulan He. 2025b. Codi: Compressing chain-of-thought into continuous space via self-distillation. arXiv preprint arXiv:2502.21074.", + "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen Zhong, Hanjie Chen, and 1 others. 2025. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419.", + "Alon Talmor, Jonathan Herzig, Nicholas Lourie, and Jonathan Berant. 2019. Commonsenseqa: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4149-4158.", + "Amir Taubenfeld, Tom Sheffer, Eran Ofek, Amir Feder, Ariel Goldstein, Zorik Gekhman, and Gal Yona. 2025. Confidence improves self-consistency in llms. arXiv preprint arXiv:2502.06233.", + "Kimi Team, A Du, B Gao, B Xing, C Jiang, C Chen, C Li, C Xiao, C Du, C Liao, and 1 others. 2025a. Kimi k1. 5: Scaling reinforcement learning with llms. URL https://arxiv.org/abs/2501.12599.", + "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, and 1 others. 2025b. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599.", + "NovaSky Team. 2025a. Sky-t1: Train your own o1 preview model within $450. https://novasky-ai.github.io/posts/sky-t1.", + "Qwen Team. 2025b. QwQ-32b: Embracing the power of reinforcement learning. https://qwenlm.github. b.io/blog/qwq-32b/.", + "Tencent. 2025. Reasoning efficiency redefined! meet Tencent's 'hunyuan-t1'—the first mamba-powered ultra-large model. https://llm.hunyuan.tencent.com/#/Blog/hy-t1/." + ], + "bbox": [ + 510, + 85, + 880, + 919 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 489, + 942, + 507, + 953 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Guangya Wan, Yuqi Wu, Jie Chen, and Sheng Li. 2024. Dynamic self-consistency: Leveraging reasoning paths for efficient llm sampling. arXiv preprint arXiv:2408.17017.", + "Junlin Wang, Shang Zhu, Jon Saad-Falcon, Ben Athiwaratkun, Qingyang Wu, Jue Wang, Shuaiwen Leon Song, Ce Zhang, Bhuwan Dhingra, and James Zou. 2025a. Think deep, think fast: Investigating efficiency of verifier-free inference-time-scaling methods. arXiv preprint arXiv:2504.14047.", + "Xinglin Wang, Shaoxiong Feng, Yiwei Li, Peiwen Yuan, Yueqi Zhang, Chuyi Tan, Boyuan Pan, Yao Hu, and Kan Li. 2024. Make every penny count: Difficulty-adaptive self-consistency for cost-efficient reasoning. arXiv preprint arXiv:2408.13457.", + "Yiming Wang, Pei Zhang, Siyuan Huang, Baosong Yang, Zhuosheng Zhang, Fei Huang, and Rui Wang. 2025b. Sampling-efficient test-time scaling: Self-estimating the best-of-n sampling in early decoding. arXiv preprint arXiv:2503.01422.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, and 1 others. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837.", + "Liang Wen, Yunke Cai, Fenrui Xiao, Xin He, Qi An, Zhenyu Duan, Yimin Du, Junchen Liu, Lifu Tang, Xiaowei Lv, and 1 others. 2025. Light-R1: Curriculum sft, dpo and rl for long cot from scratch and beyond. arXiv preprint arXiv:2503.10460.", + "Heming Xia, Yongqi Li, Chak Tou Leong, Wenjie Wang, and Wenjie Li. 2025. Tokenskip: Controllable chain-of-thought compression in llms. arXiv preprint arXiv:2502.12067.", + "Jingxian Xu, Mengyu Zhou, Weichang Liu, Hanbing Liu, Shi Han, and Dongmei Zhang. 2025a. Twt: Thinking without tokens by habitual reasoning distillation with multi-teachers' guidance. arXiv preprint arXiv:2503.24198.", + "Yuhui Xu, Hanze Dong, Lei Wang, Doyen Sahoo, Junnan Li, and Caiming Xiong. 2025b. Scalable chain of thoughts via elastic reasoning. arXiv preprint arXiv:2505.05315.", + "Yuchen Yan, Yongliang Shen, Yang Liu, Jin Jiang, Mengdi Zhang, Jian Shao, and Yueting Zhuang. 2025. Infty think: Breaking the length limits of long-context reasoning in large language models. arXiv preprint arXiv:2503.06692.", + "An Yang, Anfeng Li, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Gao, Chengen Huang, Chenxu Lv, and 1 others. 2025a. Qwen3 technical report. arXiv preprint arXiv:2505.09388." + ], + "bbox": [ + 115, + 85, + 485, + 917 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Chenxu Yang, Qingyi Si, Yongjie Duan, Zheliang Zhu, Chenyu Zhu, Zheng Lin, Li Cao, and Weiping Wang. 2025b. Dynamic early exit in reasoning models. arXiv preprint arXiv:2504.15895.", + "Junjie Yang, Ke Lin, and Xing Yu. 2025c. Think when you need: Self-adaptive chain-of-thought learning. arXiv preprint arXiv:2504.03234.", + "Wang Yang, Xiang Yue, Vipin Chaudhary, and Xiaotian Han. 2025d. Speculative thinking: Enhancing small-model reasoning with large model guidance at inference time. arXiv preprint arXiv:2504.12329.", + "Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. 2025e. Towards thinking-optimal scaling of test-time compute for llm reasoning. arXiv preprint arXiv:2502.18080.", + "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. 2025. LIMO: Less is more for reasoning. arXiv preprint arXiv:2502.03387.", + "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373.", + "Bin Yu, Hang Yuan, Yuliang Wei, Bailing Wang, Weizhen Qi, and Kai Chen. 2025a. Long-short chain-of-thought mixture supervised fine-tuning eliciting efficient reasoning in large language models. arXiv preprint arXiv:2505.03469.", + "Zhaojian Yu, Yinghao Wu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. 2025b. Z1: Efficient test-time scaling with code. arXiv preprint arXiv:2504.00810.", + "Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. 2025a. Lighthinker: Thinking step-by-step compression. arXiv preprint arXiv:2502.15589.", + "Wenyuan Zhang, Tianyun Liu, Mengxiao Song, Xiaodong Li, and Tingwen Liu. 2025b. SOTOPIAΩ: Dynamic strategy injection learning and social instruction following evaluation for social agents. Preprint, arXiv:2502.15538.", + "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. 2023. Judging LLM-as-a-judge with MT-bench and chatbot arena. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track.", + "Alireza S Ziabari, Nona Ghazizadeh, Zhivar Sourati, Farzan Karimi-Malekabadi, Payam Piray, and Morteza Dehghani. 2025. Reasoning on a spectrum: Aligning llms to system 1 and system 2 thinking. arXiv preprint arXiv:2502.12470." + ], + "bbox": [ + 510, + 85, + 880, + 879 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 489, + 942, + 507, + 954 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A More Information of S1-Bench Construction", + "text_level": 1, + "bbox": [ + 114, + 83, + 418, + 115 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.1 Benchmark Statistics", + "text_level": 1, + "bbox": [ + 114, + 126, + 332, + 140 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We survey studies on improving the efficiency of LRMs, as there is potential overlap between these studies and the technical approaches aimed at enhancing system 1 thinking in LRMs. Table 7 presents the results of our survey. We compile the benchmarks used in these studies for evaluation, that are typically used to verify whether models achieve efficiency improvements. Benchmarks that appear more than four times include: MATH500 (Hendrycks et al., 2021b), GSM8K (Cobbe et al., 2021), AIME24/25 (MAA Committees), GPQA (Rein et al., 2024), AMC23 (AI-MO, 2024), MMLU (Hendrycks et al., 2021a), Olympiad-Bench (He et al., 2024), SVAMP (Patel et al., 2021), LiveCodeBench (Jain et al., 2025), and CommonSenseQA (Talmor et al., 2019).", + "bbox": [ + 112, + 148, + 489, + 404 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The accuracy shown in Table 1 is the average result of the four models, Qwen2.5-7B, Llama3.1-8B, Mistral-8B, and Gemma2-9B, at temperature 0, using GPT-4o as the evaluator.", + "bbox": [ + 112, + 406, + 489, + 470 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.2 Subcategories in S1-Bench", + "text_level": 1, + "bbox": [ + 114, + 482, + 371, + 498 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Figure 7 shows the pie chart distribution of 28 subcategories in S1-Bench. For more details on the subcategories, please refer to Table 8,9.", + "bbox": [ + 112, + 504, + 489, + 551 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.3 Prompt for S1-Bench construction", + "text_level": 1, + "bbox": [ + 114, + 564, + 433, + 579 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "This section presents the prompts used in the construction of S1-Bench, including the Initial Generation prompt, the Discriminating Generation Quality prompt, and the Reduce Difficulty prompt. See Table 10 for details.", + "bbox": [ + 112, + 586, + 489, + 663 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B Baseline Models and Evaluation Details", + "text_level": 1, + "bbox": [ + 114, + 678, + 485, + 695 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B.1 Baseline Model Details", + "text_level": 1, + "bbox": [ + 114, + 705, + 342, + 720 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 11 presents the abbreviations, IDs, and URLs of LLMs used in this paper. Table 12 displays the abbreviations, IDs, URLs, organizations, training algorithms, and training data volumes of open-source LRMs evaluated in this study.", + "bbox": [ + 112, + 726, + 489, + 806 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B.2 GPT-4o and Human Evaluation", + "text_level": 1, + "bbox": [ + 114, + 819, + 413, + 834 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We use GPT-4o as the evaluator to assess the correctness of the responses. If a final answer can be isolated, only the final answer is evaluated; otherwise, the entire response is assessed. The evaluation prompt is provided in Table 13.", + "bbox": [ + 112, + 841, + 489, + 921 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/d9840947d8bbf7b1525dab3d32fa42e1b624558bb99254381f1ead27270614bf.jpg", + "image_caption": [ + "Figure 7: S1-Bench Category Display. The inner circle represents four major categories, and the outer circle includes 28 subcategories." + ], + "image_footnote": [], + "bbox": [ + 546, + 84, + 847, + 298 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To evaluate the consistency between the GPT-4 judge's assessments and human judgments, we conduct a comprehensive human evaluation study involving three of the authors. Specifically, we randomly sample 20 question-answer pairs from each model's greedy decoding results, resulting in a dataset of 640 pairs derived from 32 models (including 4 verifier LLMs and 28 LRMs). The questions, reference answers, and model responses are then presented to three annotators, who independently judge the correctness of each model response. The final human evaluation results are determined through majority voting. Ultimately, the Cohen's Kappa between the human evaluators and the GPT-4 judge is calculated to be 0.83, indicating an exceptionally high level of agreement.", + "bbox": [ + 507, + 379, + 884, + 636 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B.3 Accuracy Metrics Details", + "text_level": 1, + "bbox": [ + 507, + 646, + 756, + 662 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Pass@1: Followed DeepSeek-R1 (Guo et al., 2025), we calculate pass@1 to assess the percentage of correct responses among the $k = 5$ generations. Specifically, it is defined as:", + "bbox": [ + 507, + 668, + 882, + 732 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\text {p a s s} @ 1 = \\frac {1}{k} \\sum_ {i = 1} ^ {k} p _ {i}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 616, + 741, + 882, + 785 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $p_i$ is the correctness of the i-th generation. Acc@k: Since S1-Bench is composed of extremely simple questions, we calculate acc@k. Specifically, acc@k=1 if all k responses are correct and acc@k = 0 otherwise. It is defined as:", + "bbox": [ + 507, + 794, + 882, + 872 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {a c c} @ \\mathrm {k} = \\prod_ {i = 1} ^ {k} p _ {i}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 631, + 881, + 882, + 923 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 489, + 941, + 507, + 954 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/63656eb5bbdbb3adfe97f4b0f590a44d26894e6a8e5d44117cfa384530768d08.jpg", + "image_caption": [ + "Figure 8: LRMs exhibit under-accuracy and overthinking on simple problems. Shapes represent organizations, colors represent base model families, with darker colors indicating larger models, and connecting lines represent the relationships between model families and training." + ], + "image_footnote": [], + "bbox": [ + 117, + 80, + 884, + 372 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.4 Types and Analysis of Format Errors", + "text_level": 1, + "bbox": [ + 112, + 451, + 455, + 467 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This section introduces a comprehensive taxonomy of format errors and highlights the importance of addressing these issues in future research. Unlike conventional LLMs, LRMs frequently exhibit format errors. These errors are defined by failing to use a unique end thinking marker (e.g.,) to separate the thinking process from the final answer. Format errors increase the difficulty of distinguishing the thinking process from the final answer and reveal the vulnerability of LRMs in following predefined formats.", + "bbox": [ + 112, + 470, + 489, + 646 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To illustrate this phenomenon, we identify 12 distinct types of response formats produced by LRMs, each assigned a unique ID, as shown in Table 5. These 12 types are further grouped into three major categories:", + "bbox": [ + 112, + 648, + 489, + 728 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Standard-Conforming Responses: These responses meet the expected format by including exactly one end thinking marker (e.g., ) to delimit the thinking process from the final answer. Among these, type ID-100 includes a thinking process, while ID-101 omits it. The proportion of such responses is measured using the S-Corr metric.", + "- Unreadable Responses: These refer to generation failures, including cases where LRMs produce endlessly thinking content or solely" + ], + "bbox": [ + 134, + 734, + 489, + 921 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "produce end thinking markers. The proportion of all other (i.e., readable) responses is measured using the L-Corr metric.", + "bbox": [ + 544, + 451, + 884, + 499 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "- Readable but Malformed Responses: These responses deviate from the standard format yet still contain extractable information. In some cases, the final answer is missing (e.g., ID-200, ID-202, ID-205), and we instead evaluate the correctness of the thinking process. In other cases, multiple (e.g., ID-201, ID-203) or unmatched9 (e.g., ID-204, ID-206) end thinking markers are generated. In such instances, we treat the content following the last end thinking marker as the final answer for evaluation.", + "bbox": [ + 531, + 511, + 884, + 703 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 14 and Table 15 present the distributions of 12 format types under top-p sampling and greedy sampling, respectively. we find: (1) The infinite generation phenomenon is widespread across most LRMs, particularly concentrated in LRMs with fewer than 32B parameters. (2) The Nemotron and EXAONE families frequently produce correctly formatted responses without any explicit thinking processes. This behavior can be viewed as a mechanism for mitigating over-thinking. However,", + "bbox": [ + 507, + 715, + 884, + 878 + ], + "page_idx": 13 + }, + { + "type": "page_footnote", + "text": "9This paper provides a reference collection of unmatched end thinking makers: $< /$ think>, $< /$ th think>, $< /$ reason>, \\nanswer\\n, \\*\\*Final Answer\\*\\* and \\*\\*答案\\*.", + "bbox": [ + 507, + 887, + 882, + 919 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 489, + 941, + 510, + 954 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "the EXAONE family still exhibits substantial overthinking tendencies, suggesting that LRMs' capability to respond without visible reasoning and their tendency to overthink may be orthogonal characteristics. (3) None of the evaluated LRMs exhibited behaviors classified as ID-205/206.", + "bbox": [ + 112, + 84, + 489, + 180 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/d1f06c5b686490da5727a7d2a7d29bd968a112ef4adbf0a757b669783d69a035.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
FormatIDmarker (standard)marker (unmatched)marker (number)thinking processfinal answer
Standard100-1
101-1×
Readable but Malformed200-1×
201->1
202->1×
203->1×
204×≥1
205×≥1×
206×≥1×
207××0-
Unreadable300≥1××
301××0-
", + "bbox": [ + 115, + 189, + 489, + 367 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C More Experimental Setups & Results", + "text_level": 1, + "bbox": [ + 112, + 423, + 473, + 439 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.1 Greedy Sampling Results", + "text_level": 1, + "bbox": [ + 112, + 449, + 363, + 464 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 16 presents the performance of LRMs on S1-Bench under greedy sampling. While overall accuracy improves compared to top-p sampling, issues of inefficiency and accuracy degradation on simple questions remain.", + "bbox": [ + 112, + 469, + 487, + 551 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.2 Efficiency Analysis across Subcategories.", + "text_level": 1, + "bbox": [ + 112, + 562, + 484, + 577 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure 9 illustrates the average response tokens across the 28 subcategories. In the heatmap, both models (rows) and subcategories (columns) are ordered in descending order according to their average number of response tokens.", + "bbox": [ + 112, + 583, + 489, + 663 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.3 Solution Analysis Details", + "text_level": 1, + "bbox": [ + 112, + 675, + 359, + 690 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For solution analysis, We only use well-formatted thinking processes with correct final answers, as incorrect answers make it unclear whether LRMs are over-reasoning or under-reasoning, and malformed thinking processes cannot be precisely extracted. The segmentation process is performed by DeepSeek-v3, with prompts detailed in Table 17. We compute the average token count in the first solution round; if no solution is found, we use the token count of the entire thinking process.", + "bbox": [ + 112, + 695, + 489, + 857 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.4 Thinking Redundancy Analysis", + "text_level": 1, + "bbox": [ + 112, + 868, + 413, + 883 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We conduct a similarity analysis to analyze how information redundancy in the thinking processes", + "bbox": [ + 112, + 890, + 487, + 921 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "changes as reasoning sequences increase. Specifically, we first divide the complete thinking process into k equal-length segments10. Then, we encode each segment using the all-MiniLM-L6-v2 model11. For each segment, we calculate the cosine similarity with all its preceding segments and use the maximum similarity as a measure of its information redundancy. As shown in Figure 10, information redundancy increases across all four main categories as reasoning sequences increase. Sky-T1-32B shows overall lower similarity, which stems from its shorter thinking process, but still demonstrates an upward trend.", + "bbox": [ + 507, + 84, + 884, + 294 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.5 Error Analysis Details", + "text_level": 1, + "bbox": [ + 507, + 304, + 734, + 319 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In error analysis, we only use well-formatted samples, as malformed thinking processes cannot be precisely extracted. For samples with correct final answers, we categorize them based on whether the thinking process contains explicit incorrect conclusions in intermediate steps. For samples with incorrect final answers, we categorize them based on whether the correct answer is mentioned at least once during reasoning. We use DeepSeek-v3 for categorization, with prompts provided in Table 18.", + "bbox": [ + 507, + 324, + 884, + 485 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.6 Gut Moment Analysis Details", + "text_level": 1, + "bbox": [ + 507, + 495, + 791, + 511 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We prompt GPT-4o to classify the initial part of model responses (before the first '\\n\\ntypes based on its comment on difficulty: easy, neutral, difficult, and no comment. The prompts for english question can be seen in Table 19. For Chinese queries, we use the translated version of the prompt in Chinese. In Table 6, we show the most common sentence of all LRMs in each type of \"gut moment.\"", + "bbox": [ + 507, + 516, + 882, + 661 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/3619fa05d9596f3dfac74b2e6f30bf27955c5389fa40972a22f2528776849588.jpg", + "table_caption": [ + "Table 5: Twelve types of response format." + ], + "table_footnote": [], + "table_body": "
TypeSentenceCount
easy-zh这个问题看起来挺简单的308
easy-enthat seems straightforward36
difficult-zh这个问题看起来有点复杂308
difficult-enpercentages can sometimes be tricky7
neutral-zh这个问题看起来好像不难24
neutral-enHmm, interesting3
", + "bbox": [ + 514, + 671, + 877, + 769 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 6: The most common sentence in each type of \"gut moment.\"", + "bbox": [ + 507, + 778, + 882, + 808 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "D Error Cases", + "text_level": 1, + "bbox": [ + 509, + 835, + 653, + 850 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This section presents several error cases observed in LRMs. See Tables 20, 21, 22, 23, 24, and 25.", + "bbox": [ + 507, + 860, + 880, + 891 + ], + "page_idx": 14 + }, + { + "type": "page_footnote", + "text": "10We set $k = 15$ , changing its value does not affect the conclusions. 11 https://huggingface.co/sentence-transformers/allMiniLM-L6-v2", + "bbox": [ + 522, + 896, + 880, + 921 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 489, + 941, + 507, + 954 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/2f0be290628cb975dda0a9c4b4129748f34ac5ba492c7195d0629498b7739b93.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Paper AbbreviationMATHGSM8KAIMEGPQAAMCMMLUOlympiad-BenchSVAMPLiveCode-BenchCommon-SenseQA
Codi (Shen et al., 2025b)
CISC (Taubenfeld et al., 2025)
CoT-Valve (Ma et al., 2025b)
Dast (Shen et al., 2025a)
ATM (Chen et al., 2024a)
DEER (Yang et al., 2025b)
DPTS (Ding et al., 2025)
Dynasor (Fu et al., 2024)
ESC (Li et al., 2024)
Hawkeye (She et al., 2025)
token complexity (Lee et al., 2025)
INFTYTHINK (Yan et al., 2025)
KIMI K1.5 (Team et al., 2025a)
L1 (Aggarwal and Welleck, 2025)
LightThinker (Zhang et al., 2025a)
LS-Mixture SFT (Yu et al., 2025a)
DSC (Wang et al., 2024)
O1-Pruner (Luo et al., 2025)
MRT (Qu et al., 2025b)
Self-Doubt (Fu et al., 2025)
RASC (Wan et al., 2024)
NoThinking (Ma et al., 2025a)
Retro-Search (Lu et al., 2025)
RSD (Liao et al., 2025)
ST-BoN (Wang et al., 2025b)
Elastic Reasoning (Xu et al., 2025b)
FS-BoN (Munkhbat et al., 2025)
SoT (Aytes et al., 2025)
SpecReason (Pan et al., 2025)
Speculative Thinking (Yang et al., 2025d)
SPIRIT (Cui et al., 2025)
ITC Analysis (Wang et al., 2025a)
Think when needed (Yang et al., 2025c)
THINKPRUNE (Hou et al., 2025)
TALE (Han et al., 2024)
TokenSkip (Xia et al., 2025)
TOPS (Yang et al., 2025e)
efficient reasoning (Arora and Zanette, 2025)
TWT (Xu et al., 2025a)
Z1 (Yu et al., 2025b)
Count28242011866555
", + "bbox": [ + 117, + 218, + 878, + 740 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 7: A total of 40 studies on LRM efficiency before May 2025 were included. Benchmarks that appeared more than four times are listed.", + "bbox": [ + 112, + 749, + 882, + 778 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 489, + 941, + 509, + 954 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/832fc96c01a1dfa5c068c72fb411254118d8eaa8000d44b0e31f4b129b28d3d1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
cate.subcategoriesExplanation and cases
reasoning questionnumerical reasoningQuestions that require performing basic mathematical operations or solving simple algebraic equations to arrive at a numerical answer.\nCase: What's two plus three?
code reasoningQuestions that require tracing through and executing simple code snippets to determine their output or behavior when run in a specific programming environment.\nCase: What is the output of the following code when run in Python 3 environment: word = "hello"\\nprint(len(word))
set reasoningQuestions that require applying simple syllogistic reasoning to determine whether elements belong to sets based on clearly stated relationships.\nCase: All squares are quadrilaterals. A shape is a square, is it a quadrilateral?
temporal reasoningQuestions that require calculating time durations, ages, or future dates by applying simple arithmetic operations to temporal information.\nCase: How many minutes equal 120 seconds?
spatial reasoningQuestions that require determining relative positions, directions, or orientations of objects in space based on simple spatial relationships.\nCase: If a bird is flying above a tree, where is the tree in relation to the bird?
causal reasoningQuestions that require determining outcomes by applying simple cause-and-effect relationships based on given conditional statements.\nCase: If ferromagnetic material is placed in a magnetic field, it will become magnetized. An iron nail was placed next to a strong magnet for some time. Has the nail been magnetized?
natural law reasoningQuestions that require applying basic knowledge of physical laws and natural phenomena to predict simple observable outcomes in everyday scenarios.\nCase: Which is faster, an airplane or the propagation of light?
knowledge questiongeometry factsQuestions that require recalling simple and fundamental geometric properties about shapes, angles, and basic geometric figures.\nCase: How many angles does a trapezoid have?
geographic factsQuestions that require recalling simple factual information about locations, landmarks, political divisions, celestial bodies, and other basic geographic knowledge.\nCase: Which is the largest continent on Earth?
historical factsQuestions that require recalling basic facts about historical events.\nCase: Which country first invented paper?
biographical factsQuestions that require recalling basic facts about the identities, achievements, and characteristics of historical figures.\nCase: Who proposed the theory of universal gravitation?
measurement unitsQuestions that require recalling simple conversion relationships between standard units of measurement.\nCase: How many centimeters equal 1 meter?
scientific notationQuestions that require recalling basic scientific symbols, formulas, and standard units used in scientific communication.\nCase: What is the chemical symbol for oxygen?
creative authorshipQuestions that require recalling the creators or originators of notable artistic, literary, musical, and cultural works.\nCase: Who is the author of Hamlet?
", + "bbox": [ + 129, + 171, + 863, + 803 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 8: The subcategory descriptions and cases of reasoning questions and knowledge questions.", + "bbox": [ + 166, + 813, + 826, + 828 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 489, + 941, + 509, + 953 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/1ca33c1ae16b6590af6a367ff5a0eae2b58a329182ae7f749aecd0ce9bf5cb6f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
cate.subcatrgoriesExplanation and cases
instruction followingrepetition constraintsQuestions that require outputting specified characters, words, or phrases a specific number of times according to simple formatting instructions.\nCase: Output the number "7" four times, without using separators.
length constraintsQuestions that require generating outputs of a specific length or with a specific number of components based on simple counting constraints.\nCase: Output a four-digit number.
character constraintsQuestions that require generating words or numbers that conform to simple specified character patterns or formatting rules.\nCase: Output a number that begins with 8.
counting constraintsQuestions that require counting specific characters or elements within a given text or sequence.\nCase: Output the number of letter "y" in the word "yes".
transformation constraintsQuestions that require modifying text or numbers according to simple formatting or character substitution rules to produce a transformed output.\nCase: Output the word "good" with all letters capitalized directly.
sentence constraintsQuestions that require generating sentences that conform to simple specified content or structural requirements.\nCase: Give a sentence that contains the phrase "have lunch" directly.
analysis questionsentiment classificationQuestions that require determining whether simple statements express positive or negative emotions based on the tone and word choice.\nCase: Does the sentence "I hate rainy days." express a positive or negative emotion?
named entity recognitionQuestions that require identifying the correct category of named entities (such as people, places, organizations, or time expressions) within simple sentences.\nCase: In the sentence "Napoleon died in 1821", is "1821" a time or a place name?
language classificationQuestions that require identifying the language of origin for simple words or phrases based on their characteristic writing systems or common vocabulary.\nCase: Is the word "hello" English or Japanese?
topic classificationQuestions that require identifying the primary subject matter or thematic category of simple sentences based on their content and context clues.\nCase: Is the topic of the sentence "The stock market rose 2% today" finance or technology?
intent recognitionQuestions that require determining the communicative purpose behind simple utterances or statements based on their phrasing and context.\nCase: Is the intention of the sentence "I'm sorry I'm late." to apologize or to blame?
syntax classificationQuestions that require identifying the correct grammatical structure or sentence type of simple expressions based on their form, punctuation, and communicative function.\nCase: Is "Close the door!" an imperative sentence or an interrogative sentence?
grammar classificationQuestions that require identifying simple grammatical properties (like tense, voice, or polarity) of sentences based on their structure and verb forms.\nCase: Is "The apple was eaten." in active voice or passive voice?
coreference resolutionQuestions that require identifying which entity a pronoun or reference term refers to in simple sentences by tracking relationships between words in the text.\nCase: In "My computer is broken, and I need to fix it." What does "it" refer to?
", + "bbox": [ + 115, + 177, + 878, + 797 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 9: The subcategory descriptions and cases of instruction following questions and analysis questions.", + "bbox": [ + 139, + 807, + 855, + 822 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 489, + 941, + 509, + 954 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Prompt for construction workflow for S1-Bench", + "text_level": 1, + "bbox": [ + 115, + 162, + 410, + 175 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Data Generation Prompt", + "text_level": 1, + "bbox": [ + 115, + 179, + 265, + 192 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Generate 50 pairs of questions and answers in both Chinese and English based on the category's name, definition, and specific simplicity criteria. The following conditions must be satisfied:", + "bbox": [ + 115, + 195, + 882, + 218 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Questions must be naturally and clearly expressed, unambiguous, and free of intentional traps.", + "2. Answers must be unique or easily falsifiable, with no possibility of multiple correct answers.", + "3. Make the questions as diverse as possible." + ], + "bbox": [ + 115, + 217, + 670, + 249 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Category Name and Definition: {name_and_defined}", + "bbox": [ + 115, + 253, + 309, + 274 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Specific Simplicity Criteria: {criteria}", + "bbox": [ + 115, + 279, + 289, + 300 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Cases: \n## English question: {question_en} \n## English Answer: {answer_en}", + "bbox": [ + 115, + 305, + 235, + 357 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "```c\n## Chinese question:\n{question_zh}\n## Chinese Answer:\n{answer_zh}", + "bbox": [ + 115, + 361, + 236, + 403 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Please generate 50 pairs of Chinese and English questions and answers in the following format: [question]English-question[answer]English-answer[question]Chinese-question[answer]Chinese-answer...", + "bbox": [ + 115, + 407, + 717, + 429 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Start generating:", + "bbox": [ + 115, + 432, + 213, + 445 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Quality Discrimination Prompt", + "text_level": 1, + "bbox": [ + 115, + 450, + 302, + 462 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Given a question, its answer, and its category, please analyze from the following perspectives as comprehensively as possible: 1. Whether the question belongs to the specified category and meet the Specific Simplicity Criteria.", + "2. Whether the question is easy, clear, unambiguous, and has an absolutely unique answer.", + "3. Whether the answer is absolutely correct; if not, what the correct answer should be.", + "4. Whether the question is similar to other given questions, and if similar, whether more diverse questions can be generated." + ], + "bbox": [ + 115, + 466, + 833, + 519 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Category Name and Definition: {name_and_defined}", + "bbox": [ + 115, + 523, + 309, + 544 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Specific Simplicity Criteria: {criteria}", + "bbox": [ + 115, + 549, + 289, + 571 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Question and Answer: {question_with_answer}", + "bbox": [ + 115, + 575, + 258, + 596 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Other Questions: {questions_list}", + "bbox": [ + 115, + 601, + 226, + 621 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Begin your analysis, aiming to be as detailed and comprehensive as possible:", + "bbox": [ + 115, + 626, + 556, + 639 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Difficulty Reduction Prompt", + "text_level": 1, + "bbox": [ + 115, + 643, + 285, + 656 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Given a question and answer that are too complex for the model to answer correctly, you need to further reduce their difficulty while trying to:", + "- Ensure the question aligns with the Category Name and Definition.", + "- Ensure the question meets the Specific Simplicity Criteria.", + "Category Name and Definition: {name_and_defined}", + "Specific Simplicity Criteria: {criteria}", + "Question and Answer: {question_with_answer}", + "The new question and answer:" + ], + "bbox": [ + 115, + 659, + 880, + 796 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 10: \"Category Name and Definition\" refers to the subcategory name and its definition, while Specific Simplicity Criteria refers to the simplicity requirements specific to the main category.", + "bbox": [ + 112, + 810, + 880, + 839 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 489, + 941, + 509, + 954 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/ce7830741f441d5ce070ca571bc7c8b76adf816c5b619ffa656b9c0daef6fea3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelModel IDURL
Qwen2.5-7BQwen2.5-7B-Instructhttps://huggingface.co/Qwen/Qwen2.5-7B-Instruct
Llama3.1-8BLlama-3.1-8B-Instructhttps://huggingface.co/meta-llama/Llama-3.1-8B-Instruct
Mistral-8BMinistral-8B-Instruct-2410https://huggingface.co/mistralai/Ministral-8B-Instruct-2410
Gemma2-9Bgemma-2-9b-ithttps://huggingface.co/google/gemma-2-9b-it
Qwen2.5-14BQwen2.5-14B-Instructhttps://huggingface.co/Qwen/Qwen2.5-14B-Instruct
Qwen2.5-32BQwen2.5-32B-Instructhttps://huggingface.co/Qwen/Qwen2.5-32B-Instruct
Qwen2.5-72BQwen2.5-72B-Instructhttps://huggingface.co/Qwen/Qwen2.5-72B-Instruct
Llama3.3-70BLlama-3.3-70B-Instructhttps://huggingface.co/meta-llama/Llama-3.3-70B-Instruct
DeepSeek-v3DeepSeek-V3-0324https://huggingface.co/deepseek-ai/DeepSeek-V3-0324
", + "bbox": [ + 178, + 140, + 821, + 266 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/1a981bd1901d5cbde725a3f1aa0862ca5bcdcf4e957d236157126d10111daeb5.jpg", + "table_caption": [ + "Table 11: Mapping of LLM abbreviations and IDs used in this paper, with their open-source URLs." + ], + "table_footnote": [], + "table_body": "
Model IDAbbreviationBase ModelAlg.Size
DeepSeek
DeepSeek-R1-Distill-Qwen-1.5BDS-R1-1.5BQwen2.5-Math-1.5BSFT800K
DeepSeek-R1-Distill-Qwen-7BDS-R1-7BQwen2.5-Math-7BSFT800K
DeepSeek-R1-Distill-Llama-8BDS-R1-8BLlama-3.1-8BSFT800K
DeepSeek-R1-Distill-Qwen-14BDS-R1-14BQwen2.5-14BSFT800K
DeepSeek-R1-Distill-Qwen-32BDS-R1-32BQwen2.5-32BSFT800K
DeepSeek-R1-Distill-Llama-70BDS-R1-70BLlama-3.3-70B-InstructSFT800K
DeepSeek-R1DS-R1DeepSeek-V3-0324SFT&RL800K&-
Qwen
QwQ-32BQwQ-32BQwen2.5-32B--
Qwen3-235B-A22BQwen3-A22BQwen3-235B-A22B-BaseSFT&RL-&-
Qwen3-30B-A3BQwen3-A3BQwen3-30B-A3B-BaseSFT&RL-&-
Qwen3-32BQwen3-32BQwen3-32B-BaseSFT&RL-&-
Qwen3-14BQwen3-14BQwen3-14B-BaseSFT&RL-&-
Qwen3-8BQwen3-8BQwen3-8B-BaseSFT&RL-&-
Qwen3-1.7BQwen3-1.7BQwen3-1.7B-BaseSFT&RL-&-
qihoo360
Light-R1-7B-DSL-R1-7B-DSDeepSeek-R1-Distill-Qwen-7BSFT3K
Light-R1-14B-DSL-R1-14B-DSDeepSeek-R1-Distill-Qwen-14BSFT&RL3K&-
Light-R1-32B-DSL-R1-32B-DSDeepSeek-R1-Distill-Qwen-32BSFT3K
Light-R1-32BL-R1-32BQwen2.5-32B-InstructSFT&DPO73K&-
simplescaling
s1.1-7Bs1.1-7BQwen2.5-7B-InstructSFT1K
s1.1-14Bs1.1-14BQwen2.5-14B-InstructSFT1K
s1.1-32Bs1.1-32BQwen2.5-32B-InstructSFT1K
LG AI Research
EXAONE-Deep-2.4BEXAONE-2.4BEXAONE-3.5-2.4B-InstructSFT&DPO&RL1.6M&20K&10K
EXAONE-Deep-7.8BEXAONE-7.8BEXAONE-3.5-7.8B-InstructSFT&DPO&RL1.6M&20K&10K
EXAONE-Deep-32BEXAONE-32BEXAONE-3.5-32B-InstructSFT&DPO&RL1.6M&20K&10K
NVIDIA
Llama-3.1-Nemotron-Nano-8B-v1Nemotron-8BLlama-3.1-8B-InstructSFT&RL-&-
Llama-3.3-Nemotron-Super-49B-v1Nemotron-49BLlama-3.3-70B-InstructSFT&RL-&-
NovaSky
Sky-T1-32B-FlashSky-T1-32BQwen2.5-32B-InstructSFT&SimPO17K&10K
", + "bbox": [ + 132, + 416, + 863, + 834 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 12: The open-source LRMs details evaluated for S1-Bench.", + "bbox": [ + 275, + 843, + 719, + 857 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 487, + 941, + 509, + 954 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Prompt for Correctness Evaluation", + "text_level": 1, + "bbox": [ + 115, + 117, + 332, + 131 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Evaluation on S1-Bench", + "text_level": 1, + "bbox": [ + 115, + 140, + 263, + 152 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "**Question:**", + "bbox": [ + 115, + 162, + 203, + 174 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "{question}", + "bbox": [ + 115, + 177, + 179, + 191 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "**Model Answer:**", + "bbox": [ + 115, + 202, + 236, + 212 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "{model_answer}", + "bbox": [ + 115, + 217, + 213, + 229 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "**Ground Truth:**", + "bbox": [ + 115, + 241, + 228, + 253 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "{groundtruth}", + "bbox": [ + 115, + 256, + 205, + 269 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Your task is to evaluate whether the model's answer is correct.", + "bbox": [ + 115, + 281, + 473, + 293 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "An answer is considered correct as long as it contains the ground truth (regardless of how complex or detailed the description is).", + "bbox": [ + 115, + 296, + 853, + 307 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "If there are parenthetical notes after the ground truth, then there may be multiple correct answers. In this case, the given answer is just one example, and any answer that meets the requirements specified in the notes can be considered correct.", + "bbox": [ + 114, + 311, + 880, + 332 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Additionally, some reasonably uncertain supplementary information is also considered appropriate, including more details, possibilities, and expanded discussion. You should focus more on whether the reply contains the correct answer.", + "bbox": [ + 114, + 336, + 880, + 359 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "You need to output a standard JSON, providing your explanation of the evaluation in the \"explain\" field, and giving the evaluation result in the \"result\" field, where 1 means the answer is correct and 0 means it is incorrect.", + "bbox": [ + 112, + 370, + 880, + 392 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Your action should follow the given format: \"explain\": \"\", \"result\": 0/1", + "bbox": [ + 115, + 395, + 519, + 407 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/e102964c50a709822136bdbfe994f39d60087b6ab50cc89332a3509b36706e08.jpg", + "table_caption": [ + "Table 13: Prompt for Correctness Evaluation." + ], + "table_footnote": [], + "table_body": "
ModelStandardReadable but MalformedUnreadable
100101200201202203204205206207300301
Qwen3-A22B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-A3B100.000.000.000.000.000.000.000.000.000.000.000.00
QwQ-32B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-32B99.910.000.000.000.000.000.000.000.000.000.000.09
Qwen3-14B99.950.000.000.000.000.000.000.000.000.000.000.05
Qwen3-8B99.950.000.000.000.000.000.000.000.000.000.000.05
Qwen3-1.7B99.810.000.000.000.000.000.000.000.000.000.000.19
Hunyuan-T1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-70B99.910.000.090.000.000.000.000.000.000.000.000.00
DS-R1-32B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-14B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-8B99.530.000.000.000.000.000.000.000.000.240.000.24
DS-R1-7B99.240.000.000.000.000.000.000.000.000.000.000.76
DS-R1-1.5B97.580.000.000.000.000.000.000.000.000.000.002.42
Sky-T1-32B95.260.000.620.090.190.000.280.000.003.030.000.52
Nemotron-49B66.0733.930.000.000.000.000.000.000.000.000.000.00
Nemotron-8B58.0626.260.000.000.000.090.000.000.0015.020.000.57
L-R1-32B95.070.000.000.000.000.000.810.000.003.030.001.09
L-R1-32B-DS99.810.000.000.000.000.000.000.000.000.000.000.19
L-R1-14B-DS99.190.000.000.000.000.000.000.000.000.000.000.81
L-R1-7B-DS99.670.000.050.050.000.000.000.000.000.000.000.24
s1.1-32B99.530.000.000.050.000.000.000.000.000.000.000.43
s1.1-14B97.390.000.000.140.000.000.240.000.000.000.002.23
s1.1-7B88.960.000.007.960.090.000.000.000.000.090.002.89
EXAONE-32B67.3932.420.000.000.000.000.000.000.000.000.000.19
EXAONE-7.8B65.8332.230.000.000.050.470.000.000.000.140.001.28
EXAONE-2.4B81.4215.830.000.090.000.050.000.000.000.050.002.56
", + "bbox": [ + 112, + 506, + 884, + 863 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 14: Format type rates under top-p sampling.", + "bbox": [ + 327, + 872, + 665, + 887 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 487, + 941, + 507, + 954 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/1f08c05ec1b6385422750fdb4fe94ea288c5aa32337b466c418dd9e331f629f3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelStandardReadable but MalformedUnreadable
100101200201202203204205206207300301
Qwen3-A22B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-A3B100.000.000.000.000.000.000.000.000.000.000.000.00
QwQ-32B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-32B99.760.000.000.000.000.000.000.000.000.000.000.24
Qwen3-14B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-8B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-1.7B99.760.000.000.000.000.000.000.000.000.000.000.24
Hunyuan-T1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-70B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-32B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-14B99.760.000.000.000.000.000.000.000.000.000.000.24
DS-R1-8B99.530.000.000.000.000.000.000.000.000.240.000.24
DS-R1-7B97.870.000.000.000.000.000.000.000.000.000.002.13
DS-R1-1.5B91.940.000.000.000.000.000.000.000.000.000.008.06
Sky-T1-32B99.290.000.000.000.000.000.000.000.000.470.000.24
Nemotron-49B60.9039.100.000.000.000.000.000.000.000.000.000.00
Nemotron-8B55.2126.780.000.000.000.000.000.000.0016.350.001.66
L-R1-32B85.550.240.000.240.710.240.950.000.006.642.612.84
L-R1-32B-DS99.290.000.000.000.000.000.000.000.000.000.000.71
L-R1-14B-DS98.820.000.000.000.000.000.000.000.000.000.001.18
L-R1-7B-DS98.820.000.000.000.000.000.000.000.000.000.001.18
s1.1-32B98.820.000.000.000.000.000.000.000.000.000.001.18
s1.1-14B95.970.000.000.240.000.000.240.000.000.000.003.55
s1.1-7B87.910.000.006.640.000.000.000.000.000.000.005.45
EXAONE-32B65.8833.890.000.240.000.000.000.000.000.000.000.00
EXAONE-7.8B63.5133.650.000.000.000.240.000.000.000.240.002.37
EXAONE-2.4B78.9115.880.000.000.000.000.000.000.000.000.005.21
", + "bbox": [ + 114, + 307, + 882, + 664 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 15: Format type rates under greedy decoding setting.", + "bbox": [ + 297, + 675, + 697, + 689 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 487, + 941, + 509, + 954 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/82e56a71ee574ab5f5bddac61408570b4b3ccea7a66fd03d2bb855e1f7a38b1f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelSizeacc (Loose)acc (Strict)L-Corr ↑S-Corr ↑Tokens ↓
Qwen3-235B-A22B235B100.00100.00100.00100.00702.70
Qwen3-30B-A3B30B100.00100.00100.00100.00636.35
QwQ-32B32B100.00100.00100.00100.00750.41
Qwen3-32B32B99.7699.7699.7699.76673.62
Qwen3-14B14B99.7699.76100.00100.00597.06
Qwen3-8B8B99.7699.76100.00100.00649.45
Qwen3-1.7B1.7B99.5399.5399.7699.76579.01
Hunyuan-T1-100.00100.00100.00100.00541.09
DS-R1671B100.00100.00100.00100.00621.89
DS-R1-70B70B99.7699.76100.00100.00469.78
DS-R1-32B32B100.00100.00100.00100.00428.46
DS-R1-14B14B99.2999.2999.7699.76463.52
DS-R1-8B8B97.6397.3999.7699.53452.11
DS-R1-7B7B94.3194.3197.8797.87436.87
DS-R1-1.5B1.5B76.5476.5491.9491.94473.67
Sky-T1-32B32B99.5399.0599.7699.29157.12
Nemotron-49B49B99.5399.53100.00100.00337.94
Nemotron-8B8B84.6077.7398.3481.99446.62
L-R1-32B32B92.1885.7894.5585.78996.36
L-R1-32B-DS32B99.2999.2999.2999.29528.45
L-R1-14B-DS14B98.8298.8298.8298.82664.28
L-R1-7B-DS7B92.6592.6598.8298.82514.60
s1.1-32B32B98.8298.8298.8298.82983.38
s1.1-14B14B95.9795.5096.4595.97786.30
s1.1-7B7B94.3187.6894.5587.91630.52
EXAONE-32B32B97.6397.39100.0099.76746.89
EXAONE-7.8B7.8B86.7386.4997.6397.16947.92
EXAONE-2.4B2.4B72.9972.9994.7994.791394.72
", + "bbox": [ + 161, + 214, + 836, + 745 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Table 16: Main results in the greedy decoding setting on the S1-Bench, sorted by model family. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst.", + "bbox": [ + 112, + 755, + 882, + 785 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 487, + 941, + 509, + 954 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/e791da1c4318e09e42d5cbf06c66bdd4c446799ecfda954d440191112ca62b1b.jpg", + "image_caption": [ + "Figure 9: Average response token counts on the 28 subcategories, which is the average result of five generations under top-p sampling." + ], + "image_footnote": [], + "bbox": [ + 147, + 103, + 848, + 627 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/78e8d2a639b51786cbf504b87b7b9eb4482dadf5ec8f3a32a8585fe87b9a4491.jpg", + "image_caption": [ + "Figure 10: Maximum similarity between each segment and all preceding segments for LRMs across four categories." + ], + "image_footnote": [], + "bbox": [ + 139, + 715, + 858, + 872 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 487, + 941, + 510, + 954 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Prompts for Solution Segmentation", + "text_level": 1, + "bbox": [ + 115, + 114, + 334, + 126 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Task Description:", + "bbox": [ + 115, + 131, + 228, + 143 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Your task is to segment the given Chain of Thought according to the following rules:", + "bbox": [ + 115, + 143, + 600, + 153 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "1. Segmentation positions:", + "bbox": [ + 115, + 153, + 268, + 162 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Please identify and extract all sub-reasoning processes from the Chain of Thought that meet the following condition: They explicitly arrive at a conclusion (including cases phrased as questions, e.g., \"right?\") that is directly consistent with the Ground Truth. Reasoning processes that only indirectly support the Ground Truth or result in partially aligned conclusions should be excluded.", + "(2) After clearly reaching the conclusion consistent with the Ground Truth, insert the segmentation marker to distinctly separate each qualifying sub-reasoning process." + ], + "bbox": [ + 115, + 162, + 882, + 212 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "2. Output Restriction:", + "bbox": [ + 115, + 212, + 243, + 223 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) You should only directly output the segmentation result without adding any additional supplements.", + "(2) Except for inserting the separator, you must not make any other modifications to the original Chain of Thought, not even minor character-level changes such as punctuation, spacing, or capitalization. In other words, after removing all separators, the output text must match the original Chain of Thought exactly." + ], + "bbox": [ + 115, + 223, + 880, + 263 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Some cases:", + "bbox": [ + 115, + 269, + 200, + 279 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Case 1:", + "bbox": [ + 115, + 280, + 178, + 288 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Question:", + "bbox": [ + 115, + 288, + 200, + 298 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "What is the value of $\\mathbf{x}$ if $\\mathbf{x}$ plus 3 equals 8?", + "bbox": [ + 115, + 298, + 361, + 310 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Ground Truth:", + "bbox": [ + 115, + 313, + 225, + 324 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "5", + "bbox": [ + 115, + 325, + 126, + 335 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Chain of Thought:", + "bbox": [ + 115, + 341, + 248, + 351 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Okay, so I need to figure out what x is if x plus 3 equals 8. The problem says \"x plus 3 equals 8,\" so in mathematical terms, that would be: $x + 3 = 8$", + "bbox": [ + 115, + 351, + 880, + 370 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Alright, now I need to solve for $x$ . From what I remember in algebra, the goal is to isolate $x$ on one side of the equation. That means I need to get rid of that \"+\"3\" that's being added to $x$ . To do that, I should perform the opposite operation. Since it's adding 3, the opposite would be subtracting 3.", + "bbox": [ + 115, + 370, + 880, + 401 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "So, if I subtract 3 from both sides, that should keep things equal. Let me try that: $x + 3 - 3 = 8 - 3$", + "bbox": [ + 115, + 401, + 672, + 412 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Simplifying both sides, on the left side, the $+3$ and $-3$ cancel each other out, leaving just $x$ . And on the right side, 8 minus 3 is 5. So that simplifies to: $x = 5$", + "bbox": [ + 115, + 412, + 880, + 431 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Wait, let me check if that makes sense. If I substitute $\\mathbf{x}$ back into the original equation, does it hold true? Let's see:", + "bbox": [ + 115, + 432, + 771, + 442 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Original equation: $x + 3 = 8$", + "bbox": [ + 115, + 442, + 278, + 451 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Substitute $x = 5: 5 + 3 = 8$", + "bbox": [ + 115, + 451, + 270, + 461 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "5 plus 3 is indeed 8, so that works out.", + "bbox": [ + 115, + 462, + 337, + 470 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Another way might be to consider what number, when you add 3 to it, gives 8. So starting from 8, if I take away 3, I should get back to the original number. That's the same as subtracting 3 from 8, which again gives 5. So that's consistent with the first method.", + "bbox": [ + 115, + 472, + 880, + 492 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Wait a second, what if the original problem was written differently? The problem says \"x plus 3 equals 8.\" In symbols, that's unambiguous: $x + 3 = 8$ . So no, I think that's right.", + "bbox": [ + 115, + 492, + 880, + 512 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "**Final Answer**", + "bbox": [ + 115, + 512, + 223, + 521 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The value of $x$ is $\\boxed{5}$ .", + "bbox": [ + 115, + 523, + 238, + 538 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Output:", + "bbox": [ + 115, + 542, + 186, + 552 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Okay, so I need to figure out what x is if x plus 3 equals 8. The problem says \"x plus 3 equals 8,\" so in mathematical terms, that would be: $x + 3 = 8$", + "bbox": [ + 115, + 552, + 880, + 571 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Alright, now I need to solve for $x$ . From what I remember in algebra, the goal is to isolate $x$ on one side of the equation. That means I need to get rid of that \"+\"3\" that's being added to $x$ . To do that, I should perform the opposite operation. Since it's adding 3, the opposite would be subtracting 3.", + "bbox": [ + 115, + 571, + 880, + 602 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "So, if I subtract 3 from both sides, that should keep things equal. Let me try that: $x + 3 - 3 = 8 - 3$", + "bbox": [ + 115, + 602, + 672, + 612 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Simplifying both sides, on the left side, the $+3$ and $-3$ cancel each other out, leaving just x. And on the right side, 8 minus 3 is 5. So that simplifies to: $x = 5 < \\text{split}>$", + "bbox": [ + 115, + 612, + 880, + 633 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Wait, let me check if that makes sense. If I substitute $\\mathbf{x}$ back into the original equation, does it hold true? Let's see:", + "bbox": [ + 115, + 633, + 771, + 643 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Original equation: $x + 3 = 8$", + "bbox": [ + 115, + 643, + 278, + 653 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Substitute $x = 5: 5 + 3 = 8$", + "bbox": [ + 115, + 653, + 268, + 662 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "5 plus 3 is indeed 8, so that works out.", + "bbox": [ + 115, + 663, + 379, + 673 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Another way might be to consider what number, when you add 3 to it, gives 8. So starting from 8, if I take away 3, I should get back to the original number. That's the same as subtracting 3 from 8, which again gives 5. So that's consistent with the first method.", + "bbox": [ + 115, + 673, + 880, + 693 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Wait a second, what if the original problem was written differently? The problem says \"x plus 3 equals 8.\" In symbols, that's unambiguous: $x + 3 = 8$ . So no, I think that's right.", + "bbox": [ + 115, + 693, + 880, + 713 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "**Final Answer**", + "bbox": [ + 115, + 713, + 223, + 722 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The value of x is 5. ", + "bbox": [ + 115, + 722, + 282, + 739 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "...Other examples are omitted.)", + "bbox": [ + 115, + 743, + 299, + 753 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Eval Target:", + "bbox": [ + 115, + 759, + 196, + 769 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "## Question:", + "bbox": [ + 115, + 769, + 191, + 778 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "{question}", + "bbox": [ + 115, + 780, + 179, + 791 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Ground Truth:", + "bbox": [ + 115, + 795, + 218, + 804 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "{groundtruth}", + "bbox": [ + 115, + 804, + 205, + 816 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Chain of Thought:", + "bbox": [ + 115, + 821, + 240, + 832 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "{thinking_process}", + "bbox": [ + 115, + 832, + 226, + 843 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Output:", + "bbox": [ + 115, + 847, + 179, + 858 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Table 17: Prompts for Solution Segmentation.", + "bbox": [ + 341, + 873, + 653, + 887 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 487, + 941, + 507, + 954 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Prompts for Error Analysis", + "text_level": 1, + "bbox": [ + 115, + 198, + 285, + 211 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Prompts for samples whose final answer is correct", + "text_level": 1, + "bbox": [ + 115, + 215, + 413, + 228 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Task Description:", + "bbox": [ + 115, + 231, + 230, + 244 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "You will receive a Question, its corresponding Ground Truth, and a Chain of Thought(COT) generated by a LLM for that Question. Your task is to carefully analyze the CoT and assign it to one of the two predefined categories listed below.", + "bbox": [ + 115, + 243, + 865, + 265 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Categories:", + "bbox": [ + 115, + 268, + 194, + 280 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "1: The CoT ***includes explicit incorrect conclusions*** in intermediate reasoning steps.", + "bbox": [ + 115, + 280, + 631, + 290 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "2: The CoT ***doesn't include any explicit incorrect conclusion*** in intermediate reasoning steps.", + "bbox": [ + 115, + 290, + 690, + 300 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Output your evaluation in the following format:", + "bbox": [ + 115, + 304, + 389, + 317 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "TheReason:", + "bbox": [ + 115, + 319, + 196, + 331 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "[note: Conduct a step-by-step analysis, stating if and where explicit incorrect conclusions occur in the COT.]", + "bbox": [ + 115, + 331, + 736, + 341 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "ErrorType:", + "bbox": [ + 115, + 344, + 189, + 356 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "[note: Summarize each incorrect conclusion into a specific error type using a phrase of less than 5 words, such as factual inaccuracies, logical fallacies, comprehension mistakes, calculation errors, formatting issues, and so forth, to better conduct further evaluation and analysis. Directly output a Python list, where each element represents the error type of a specific incorrect conclusion in the CoT. If there are no incorrect conclusions, return an empty list.]", + "bbox": [ + 115, + 356, + 882, + 397 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "TheCategory:", + "bbox": [ + 115, + 400, + 206, + 412 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "[note: Provide your classification based on your analysis using only the number \"1\" or \"2\". Do not add any additional text.]", + "bbox": [ + 115, + 410, + 820, + 422 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Question:", + "bbox": [ + 115, + 425, + 184, + 436 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "{question}", + "bbox": [ + 115, + 436, + 179, + 448 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Ground Truth:", + "bbox": [ + 115, + 451, + 211, + 461 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "{groundtruth}", + "bbox": [ + 115, + 462, + 200, + 474 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "COT:", + "bbox": [ + 115, + 476, + 159, + 487 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "{thinking_process}", + "bbox": [ + 115, + 487, + 226, + 499 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "TheReason:", + "bbox": [ + 115, + 502, + 196, + 514 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Prompts for samples whose final answer is incorrect", + "text_level": 1, + "bbox": [ + 115, + 520, + 423, + 532 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Task Description:", + "bbox": [ + 115, + 536, + 230, + 548 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "You will receive a Question, its corresponding Ground Truth, and a Chain of Thought(COT) generated by a LLM for that Question. Your task is to carefully analyze the CoT and assign it to one of the two predefined categories listed below.", + "bbox": [ + 115, + 548, + 865, + 568 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Categories:", + "bbox": [ + 115, + 573, + 194, + 583 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "1: Regardless of whether the CoT ultimately arrives at the correct final answer or not, ***the correct answer is explicitly mentioned at least once*** within the reasoning steps (even if it is not ultimately adopted).", + "bbox": [ + 115, + 583, + 880, + 604 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "2: ***The correct answer is never explicitly mentioned or referenced*** at any point within the reasoning steps.", + "bbox": [ + 115, + 604, + 757, + 615 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Output your evaluation in the following format:", + "bbox": [ + 115, + 618, + 389, + 631 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "TheReason:", + "bbox": [ + 115, + 634, + 196, + 646 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "[note: Conduct a step-by-step analysis, explicitly stating whether and where a correct answer is mentioned within the reasoning steps.]", + "bbox": [ + 115, + 645, + 880, + 656 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "TheCategory:", + "bbox": [ + 115, + 659, + 206, + 671 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "[note: Provide your classification based on your analysis using only the number \"1\" or \"2\". Do not add any additional text.]", + "bbox": [ + 115, + 670, + 820, + 681 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Question:", + "bbox": [ + 115, + 684, + 184, + 696 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "{question}", + "bbox": [ + 115, + 696, + 179, + 707 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Ground Truth:", + "bbox": [ + 115, + 711, + 211, + 721 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "{answer}", + "bbox": [ + 115, + 721, + 169, + 732 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "COT:", + "bbox": [ + 115, + 736, + 159, + 746 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "{thinking_part}", + "bbox": [ + 115, + 747, + 206, + 758 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "TheReason:", + "bbox": [ + 115, + 762, + 196, + 772 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Table 18: Prompts for Error Analysis.", + "bbox": [ + 369, + 789, + 626, + 804 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 487, + 941, + 509, + 954 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Prompts for classify the \"gut moment\" for English questions", + "text_level": 1, + "bbox": [ + 115, + 93, + 485, + 105 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Overall Task Description", + "bbox": [ + 115, + 110, + 272, + 121 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "You will be given the beginning portion of a response written by a large language model when answering a question. Your task is to classify the response into one of the following four categories based on the initial comment about **the difficulty of the question**.", + "bbox": [ + 115, + 121, + 880, + 141 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Important: Only consider the $^{**}$ initial comment $^{**}$ on difficulty made in the response. If the model later changes its assessment, please ignore those later revisions—focus solely on the first difficulty-related comment.", + "bbox": [ + 115, + 141, + 880, + 161 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "You must assign the response to exactly one of the four categories below:", + "bbox": [ + 115, + 161, + 536, + 171 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Category 1: Initial comment indicates the question is easy. Initial comment includes phrases like \"simple,\" \"basic,\" \"straightforward,\" \"common,\" etc., clearly stating the question is easy.", + "bbox": [ + 115, + 171, + 882, + 191 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Category 2: Initial comment indicates the question is difficult: Initial comment includes phrases like \"a bit complex,\" \"somewhat difficult,\" \"challenging,\" \"tricky,\" etc., clearly stating the question is hard.", + "bbox": [ + 115, + 191, + 880, + 211 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Category 3: Initial comment is neutral or suggests moderate difficulty: Initial comment includes phrases like \"interesting,\" \"not hard,\" \"not complex,\" \"fun,\" etc., without a clear indication of difficulty level.", + "bbox": [ + 115, + 211, + 882, + 231 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Category 4: No comment on difficulty: The response contains no mention of difficulty at all.", + "bbox": [ + 115, + 231, + 645, + 242 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Please output your result in the following format:", + "bbox": [ + 115, + 247, + 400, + 256 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "[Explanation of classification][Category (just the number)][Short phrase containing the comment]", + "bbox": [ + 115, + 256, + 744, + 268 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "If it falls into Category 4 (no comment), write \"None\" for the [Short phrase containing the comment] part.", + "bbox": [ + 115, + 268, + 721, + 278 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Examples for each category will be shown below.", + "bbox": [ + 115, + 282, + 411, + 294 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "## Category 1 Examples:", + "bbox": [ + 115, + 298, + 262, + 310 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Question:", + "bbox": [ + 115, + 315, + 173, + 324 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "What is two plus three?", + "bbox": [ + 115, + 325, + 253, + 336 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Response:", + "bbox": [ + 115, + 341, + 176, + 350 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Hmm, the user is asking what two plus three equals. This question seems very simple, though it might be a bit confusing for someone just starting to learn math. First, I need to figure out what the user's intent is. They might be testing my basic calculation ability or genuinely need help solving the problem.", + "bbox": [ + 115, + 351, + 880, + 382 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Output:", + "bbox": [ + 115, + 387, + 161, + 397 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "The initial comment describes the question as \"very simple\"1This question seems very simple", + "bbox": [ + 115, + 397, + 722, + 409 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "...(Other examples are omitted.)", + "bbox": [ + 115, + 413, + 299, + 423 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "## Category 2 Examples:", + "bbox": [ + 115, + 428, + 262, + 439 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Question:", + "bbox": [ + 115, + 445, + 173, + 454 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "What's the next floor after basement level two?", + "bbox": [ + 115, + 455, + 386, + 464 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Response:", + "bbox": [ + 115, + 470, + 176, + 481 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Hmm, this question seems a bit complex, but I'll try to understand it first. The question is \"What's the next floor after basement level two?\" First, I need to clarify what \"basement level two\" means. Usually, the first basement is the deepest, and then going up you have ground floor, first floor, and so on.", + "bbox": [ + 115, + 481, + 880, + 512 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Output:", + "bbox": [ + 115, + 517, + 161, + 527 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "The initial comment says it's \"a bit complex\" $<\\text{sep}>2$ $<\\text{sep}>$ This question seems a bit complex", + "bbox": [ + 115, + 527, + 648, + 538 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "...Other examples are omitted.)", + "bbox": [ + 115, + 542, + 299, + 552 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "## Category 3 Examples:", + "bbox": [ + 115, + 558, + 262, + 569 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Question:", + "bbox": [ + 115, + 570, + 173, + 577 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "\"Doctors recommend drinking at least eight glasses of water a day.\" Is this sentence about health or education?", + "bbox": [ + 115, + 577, + 749, + 589 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Response:", + "bbox": [ + 115, + 594, + 176, + 604 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Hmm, this question seems not complicated, though if you think carefully it might be a bit tricky. First, I need to understand the meaning of the sentence \"Doctors recommend drinking at least eight glasses of water a day.\"", + "bbox": [ + 115, + 604, + 880, + 625 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Output:", + "bbox": [ + 115, + 630, + 161, + 639 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "The initial comment says it's \"not complicated,\" even though it later mentions it might be tricky. Classification should follow the initial comment3This question doesn't seem complicated", + "bbox": [ + 115, + 640, + 880, + 661 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "...Other examples are omitted.)", + "bbox": [ + 115, + 665, + 299, + 676 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "## Category 4 Examples:", + "bbox": [ + 115, + 681, + 262, + 692 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Question:", + "bbox": [ + 115, + 697, + 173, + 707 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "All birds have feathers. A sparrow is a bird. Does a sparrow have feathers?", + "bbox": [ + 115, + 708, + 544, + 719 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Response:", + "bbox": [ + 115, + 724, + 176, + 733 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Hmm, the user is asking a question about birds and feathers—specifically, that all birds have feathers, a sparrow is a bird, so does the sparrow have feathers? This looks like a logic reasoning question. The user might be learning the basics of logic or biology.", + "bbox": [ + 115, + 734, + 880, + 755 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Output:", + "bbox": [ + 115, + 760, + 161, + 769 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Although the response mentions it's a \"logic reasoning question,\" it contains no evaluation of difficulty4None", + "bbox": [ + 115, + 770, + 811, + 781 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "...Other examples are omitted.)", + "bbox": [ + 115, + 785, + 299, + 796 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Here is the one you need to classify.", + "bbox": [ + 115, + 801, + 334, + 812 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Question:", + "bbox": [ + 115, + 816, + 173, + 826 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "question", + "bbox": [ + 115, + 828, + 166, + 838 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Response:", + "bbox": [ + 115, + 843, + 176, + 852 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "response", + "bbox": [ + 115, + 854, + 166, + 864 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Output:", + "bbox": [ + 115, + 869, + 161, + 879 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Table 19: Prompts for classifying the \"gut moment\" in English questions.", + "bbox": [ + 250, + 895, + 744, + 909 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 487, + 942, + 507, + 953 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Categories: instruction following Model: DeepSeek-R1-Distill-Qwen-7B", + "text_level": 1, + "bbox": [ + 115, + 361, + 564, + 375 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Question:", + "text_level": 1, + "bbox": [ + 115, + 385, + 176, + 395 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Output the number \"7\" four times, without using separators.", + "bbox": [ + 115, + 399, + 458, + 411 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Ground Truth:", + "text_level": 1, + "bbox": [ + 115, + 413, + 208, + 424 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "7777", + "bbox": [ + 115, + 428, + 147, + 438 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Thinking process:", + "text_level": 1, + "bbox": [ + 115, + 442, + 225, + 454 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "To solve the problem of outputting the number \"7\" four times without using separators, I need to find a way to repeat the digit 7 multiple times without relying on concatenation or other connecting characters.", + "bbox": [ + 115, + 456, + 880, + 478 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "One approach is to use mathematical operations that inherently produce the number 7. Addition is a simple and effective operation that can be used to achieve this. By adding 7 to itself multiple times, I can create the desired sequence of numbers.", + "bbox": [ + 115, + 478, + 880, + 498 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Specifically, adding 7 four times will give me $7 + 7 + 7 + 7$ , which equals 28. This method ensures that the number 7 is used exactly four times without the need for any separators.", + "bbox": [ + 115, + 498, + 880, + 518 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Therefore, the solution is to use the addition operation to repeat the number 7 four times and output the result.", + "bbox": [ + 115, + 518, + 742, + 529 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Final Answer:", + "text_level": 1, + "bbox": [ + 115, + 531, + 201, + 542 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "To output the number \"7\" four times without using separators, you can utilize mathematical operations that inherently produce the number 7 multiple times. One straightforward way is to use addition:", + "bbox": [ + 115, + 546, + 880, + 567 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n[ 7 + 7 + 7 + 7 = 2 8 ]\n$$\n", + "text_format": "latex", + "bbox": [ + 115, + 569, + 231, + 581 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "This expression shows that the number \"7\" is used four times in a row without any separators, resulting in the total of 28.", + "bbox": [ + 115, + 583, + 806, + 595 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n[ \\overline {{7 + 7 + 7 + 7}} ]\n$$\n", + "text_format": "latex", + "bbox": [ + 115, + 598, + 218, + 614 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Table 20: Error Case for LRM.", + "bbox": [ + 391, + 629, + 603, + 640 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 487, + 941, + 507, + 954 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Question:", + "text_level": 1, + "bbox": [ + 115, + 170, + 176, + 181 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "输出一个动物的英文单词,并且只有三个字母", + "bbox": [ + 115, + 183, + 403, + 195 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Output an English word for an animal, and it must be only three letters long.", + "bbox": [ + 115, + 197, + 552, + 209 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Ground Truth:", + "text_level": 1, + "bbox": [ + 115, + 211, + 208, + 222 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "任何一个由三个字母组成的动物名称都可以(例如:cat)", + "bbox": [ + 115, + 224, + 468, + 236 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Any animal name consisting of three letters is acceptable (e.g., cat).", + "bbox": [ + 115, + 237, + 502, + 250 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Thinking process:", + "text_level": 1, + "bbox": [ + 115, + 252, + 225, + 263 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "嗯,用户让我输出一个动物的英文单词,而且必须是三个字母的。首先,我得确定有哪些常见的动物是三个字母的。可能想到的有猫(Cat)、狗(Dog),不过这两个都是三个字母吗?Cat是C-A-T,确实是三个字母,Dog也是D-O-G,三个字母。那这两个应该都可以。不过用户可能需要更多的例子,或者只需要一个?题目里说的是“输出一个”,所以可能只需要一个。", + "bbox": [ + 115, + 266, + 880, + 307 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "不过等等,可能还有其他选项,比如Ant(蚂蚁)、Ape(猿)、Bat(蝙蝠)、Bear(熊)是四个字母,所以不行。Fish是四个字母,Fox是三个字母,F-O-X,对,Fox也是三个字母。还有Lion是四个字母,不行。Tiger也是五个字母。Rat是三个字母,R-A-T。还有Sheep是五个字母,Cow是三个字母,C-O-W。所以可能的选择还挺多的。", + "bbox": [ + 115, + 310, + 880, + 344 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "不过用户可能想要的是最常见的,比如Cat、Dog、Fox、Ant、Rat、Cow这些。需要确认这些单词是否都是正确的动物名称。比如Ant是蚂蚁,没错。Fox是狐狸,Rat是老鼠,Cow是奶牛。这些都是正确的。所以可能需要从这些里面选一个。", + "bbox": [ + 115, + 347, + 880, + 370 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "不过用户可能希望答案只有一个,所以可能需要随机选一个,或者选最典型的。比如Cat和Dog是最常见的宠物,可能更容易被想到。但Fox也是一个不错的选择,可能更有趣一些。或者像Ape,虽然也是三个字母,但可能不如其他常见。", + "bbox": [ + 115, + 372, + 880, + 395 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "另外,用户有没有可能有其他需求?比如是否区分大小写?题目里说是英文单词,通常首字母不大写,除非是专有名词。所以应该用小写,比如cat、dog。不过输出的时候可能需要大写首字母,但题目没有特别说明,可能保持小写即可。", + "bbox": [ + 115, + 397, + 873, + 419 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "再检查一下有没有拼写错误,比如Bat是B-A-T,没错。Rat是R-A-T,正确。Cow是C-O-W,没错。所以这些都可以作为候选答案。现在需要从中选一个作为回答。可能用户希望得到多个例子,但题目要求输出一个,所以可能需要选最典型的。比如Cat或者Dog,因为这两个是最常见的宠物,可能更容易被用户想到。不过也有可能用户希望有变化,比如Fox这样的动物,比较有趣。", + "bbox": [ + 115, + 420, + 880, + 464 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "或者,用户可能是在测试知识库,看看是否知道除了Cat和Dog之外的其他三个字母的动物。比如Ant,虽然蚂蚁很小,但确实是动物。或者Rat,虽然老鼠可能不太受欢迎,但确实属于动物。Cow是家畜,可能也是合理的选择。", + "bbox": [ + 115, + 466, + 880, + 489 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "所以,综合考虑,可能需要给出多个例子,但题目要求输出一个。这时候可能需要选择其中一个,比如Cat,或者Dog,或者Fox。例如,回答“Cat”或者“Dog”都是正确的。不过为了多样性,可能选Fox,因为不是所有人都会立刻想到它,而Cat和Dog比较基础。或者用户可能希望得到不同的答案,所以可以选其中一个,比如“Fox”作为回答。", + "bbox": [ + 115, + 489, + 872, + 525 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Well, the user asked me to output an English word for an animal, and it must be three letters long. First, I need to figure out which common animals have three letters. I can think of Cat and Dog, but are both of these three letters? Cat is C-A-T, and it is indeed three letters. Dog is D-O-G, also three letters. So both of these should be valid. But the user might need more examples, or just one? The task says \"output one,\" so it might only require one.", + "bbox": [ + 114, + 527, + 880, + 570 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "But wait, there could be other options, like Ant, Ape, Bat, and Bear, but Bear has four letters, so it doesn't count. Fish has four letters, too. Fox is three letters, F-O-X, yes, Fox is three letters. Lion has four letters, so that doesn't work. Tiger has five letters. Rat is three letters, R-A-T. Sheep has five letters. Cow is three letters, C-O-W. So there are quite a few possibilities.", + "bbox": [ + 114, + 570, + 880, + 600 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "However, the user might be looking for the most common ones, like Cat, Dog, Fox, Ant, Rat, and Cow. I need to check if these words are all correct animal names. For example, Ant is an ant, that's right. Fox is a fox, Rat is a rat, Cow is a cow. These are all correct. So we might need to choose one from these.", + "bbox": [ + 114, + 601, + 880, + 630 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "But the user may want only one answer, so I might need to pick randomly or choose the most typical one. For instance, Cat and Dog are the most common pets, so they might be easier to think of. But Fox is also a good choice and might be more interesting. Or something like Ape, though it's three letters, might not be as common as the others.", + "bbox": [ + 114, + 631, + 880, + 659 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Also, is there any other requirement from the user? For instance, should I differentiate between uppercase and lowercase? The task says it's an English word, and usually, the first letter isn't capitalized unless it's a proper noun. So, it should be in lowercase, like cat, dog. However, when outputting, the first letter might need to be capitalized, but the task doesn't specify this, so lowercase should be fine.", + "bbox": [ + 114, + 659, + 880, + 690 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Let's double-check for spelling errors: Bat is B-A-T, correct. Rat is R-A-T, correct. Cow is C-O-W, correct. So these can all be candidate answers. Now, I need to choose one as the response. The user might want several examples, but the task asks for only one, so I might need to pick the most typical one. For example, Cat or Dog, since these are the most common pets and might be easier for the user to think of. But Fox is also a good choice, as it's more interesting.", + "bbox": [ + 114, + 690, + 880, + 731 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Or the user might be testing the knowledge base to see if it knows other three-letter animals besides Cat and Dog. For example, Ant, although ants are small, they are indeed animals. Or Rat, although rats might not be as popular, they are still animals. Cow is livestock, so that could also be a reasonable choice.", + "bbox": [ + 114, + 732, + 880, + 760 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "So, taking everything into account, I might need to give multiple examples, but the task requires one. In that case, I could choose one, like Cat, or Dog, or Fox. For instance, answering \"Cat\" or \"Dog\" would both be correct. However, for diversity, I might pick Fox, because not everyone will immediately think of it, whereas Cat and Dog are more basic. Or the user might want a different answer, so I could choose one of them, like \"Fox.\"", + "bbox": [ + 114, + 762, + 880, + 801 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Final Answer:", + "text_level": 1, + "bbox": [ + 115, + 802, + 205, + 810 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "**Wolf**", + "bbox": [ + 115, + 815, + 174, + 825 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Table 21: Error Case for LRM.", + "bbox": [ + 391, + 843, + 601, + 856 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Categories: instruction following Model: hunyuan-T1", + "bbox": [ + 115, + 147, + 455, + 160 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 487, + 941, + 507, + 954 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Categories: knowledge question Model: DeepSeek-R1-Distill-Qwen-7B", + "bbox": [ + 115, + 127, + 559, + 141 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Question:", + "text_level": 1, + "bbox": [ + 115, + 151, + 176, + 162 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "In which city is the Sydney Opera House located?", + "bbox": [ + 115, + 165, + 403, + 177 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Ground Truth:", + "text_level": 1, + "bbox": [ + 115, + 179, + 208, + 190 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Sydney.", + "text_level": 1, + "bbox": [ + 115, + 193, + 161, + 206 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Thinking process:", + "text_level": 1, + "bbox": [ + 115, + 208, + 225, + 221 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Okay, so I need to figure out which city the Sydney Opera House is in. I know that Australia has a lot of cities, but I'm pretty sure the Opera House isn't in Sydney. Wait, isn't it in a different place? I think I've heard of a place called Petersham before, maybe that's where it's located. I remember seeing pictures of it in the news once, and they mentioned Petersham as the area where it's situated. But I'm not entirely sure if it's just a suburb or a major city. Maybe I should think about other major cities in Australia. There's Melbourne, which is known as the \"Melbourne\" city, and then there's Brisbane, which is more on the northern side. Then there's Sydney, which is the capital, but I don't think the Opera House is there. I think it's in a regional area instead of the capital city. So, putting it all together, I'm pretty confident that the Sydney Opera House is located in Petersham, which is a suburb in New South Wales, Australia. But I should double-check to make sure I'm not mixing up the names of places or cities. Maybe I can recall any famous landmarks in Petersham. I think there's a big shopping center there called the Petersham Shopping Centre, and I've heard that the Opera House is nearby. Yeah, that makes sense. So, I'm going to go with Petersham as the location.", + "bbox": [ + 112, + 223, + 882, + 325 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Final Answer:", + "text_level": 1, + "bbox": [ + 115, + 326, + 203, + 338 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The Sydney Opera House is located in Petersham, a suburb in New South Wales, Australia.", + "bbox": [ + 115, + 342, + 636, + 354 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Table 22: Error Case for LRM.", + "bbox": [ + 391, + 370, + 603, + 382 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Categories: knowledge question Model: DeepSeek-R1-Distill-Qwen-7B", + "text_level": 1, + "bbox": [ + 115, + 479, + 559, + 492 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Question:", + "text_level": 1, + "bbox": [ + 115, + 502, + 176, + 514 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Who composed *Symphony No. 9 in D minor, Op. 125*?", + "bbox": [ + 115, + 516, + 448, + 527 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Ground Truth:", + "text_level": 1, + "bbox": [ + 115, + 530, + 208, + 542 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Ludwig van Beethoven", + "bbox": [ + 115, + 545, + 250, + 557 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Thinking process:", + "text_level": 1, + "bbox": [ + 115, + 558, + 225, + 571 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Alright, so I need to figure out who composed Symphony No. 9 in D minor, Op. 125. Let me start by recalling what I know about classical music composers. I know that there are several famous composers, but I'm trying to remember if it's someone like Beethoven, Mozart, or perhaps someone from a later era.", + "bbox": [ + 112, + 574, + 882, + 605 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Wait, Beethoven wrote many symphonies, but I think he didn't write Symphony No. 9 in D minor. Let me think. Beethoven's 9th symphony is actually in E flat minor, Op. 67, right? So that's not it. Then there's Beethoven's Moonlight Sonata, which is different.", + "bbox": [ + 112, + 605, + 880, + 626 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Mozart composed a lot of piano concertos, but I don't think he wrote a symphony in D minor. His compositions are usually in various key signatures, but D minor isn't one I associate with him. Then there's Schubert, who wrote the famous symphonies, but I don't recall him composing a symphony in D minor either.", + "bbox": [ + 112, + 626, + 880, + 656 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Wait, maybe it's someone else. I've heard of Igor Stravinsky before, but he was more known for his operas like The Rite of Spring. Then there's Paul Simon, but he's a contemporary composer, so probably not.", + "bbox": [ + 112, + 656, + 880, + 676 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Oh, maybe it's someone from the classical era. Let me think about other composers. There's Shostakovich, but I don't think he wrote that. What about someone like Tchaikovsky? No, he wrote symphonies in C minor and F major, but not D minor.", + "bbox": [ + 112, + 676, + 880, + 696 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Hmm, maybe it's someone more obscure. I think I've heard of a composer named Igor Kovalsky. Wait, was that the composer of this symphony? I'm not entirely sure, but I think he's associated with writing complex symphonies. Alternatively, could it be someone like Shoenfield? I'm not certain.", + "bbox": [ + 112, + 696, + 880, + 726 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Wait, I think I remember that Igor Kovalsky wrote this symphony. It's sometimes referred to as \"The New World\" because of its emotional depth and complexity. The symphony is in D minor, Op. 125, and it's known for its intense, dramatic sound. I think that's correct, but I should double-check to make sure it's not another composer.", + "bbox": [ + 112, + 726, + 880, + 756 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Another thought: maybe it's someone from the 19th century. I'm trying to recall if any composers from that era wrote such a symphony. I'm not sure, but I think Igor Kovalsky is the one. He was an American composer, so that makes sense as a modern touch in classical music.", + "bbox": [ + 112, + 756, + 880, + 785 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "So, putting it all together, I believe the composer is Igor Kovalsky, and the symphony is known for its dramatic and intense sound. I think that's the correct answer, but I should confirm if there's any other possibility I might be forgetting.", + "bbox": [ + 112, + 785, + 880, + 807 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Final Answer:", + "text_level": 1, + "bbox": [ + 115, + 809, + 205, + 821 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The composer of Symphony No. 9 in D minor, Op. 125, is Igor Kovalsky. This symphony is often recognized for its dramatic and intense sound, sometimes referred to as \"The New World.\"", + "bbox": [ + 112, + 824, + 880, + 845 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Table 23: Error Case for LRM.", + "bbox": [ + 389, + 862, + 601, + 875 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 487, + 941, + 509, + 954 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Categories: reasoning question Model: s1.1-7B", + "text_level": 1, + "bbox": [ + 115, + 96, + 413, + 110 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Question:", + "text_level": 1, + "bbox": [ + 115, + 120, + 176, + 131 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "1.5加3.5等于几?", + "bbox": [ + 115, + 134, + 218, + 145 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "What is 1.5 plus 3.5?", + "bbox": [ + 115, + 147, + 238, + 158 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Ground Truth:", + "text_level": 1, + "bbox": [ + 115, + 161, + 208, + 172 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "5", + "bbox": [ + 115, + 175, + 126, + 186 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Thinking process:", + "text_level": 1, + "bbox": [ + 115, + 190, + 223, + 200 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "好的,我需要计算1.5加3.5等于多少。首先,这两个数都是小数,处理起来相对简单。我可以在心里快速算一下。", + "bbox": [ + 115, + 203, + 815, + 215 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "首先,1.5加上3.5。可以将它们看成是 $1 + 0.5 + 3 + 0.5$ 。这样就可以先将整数部分相加,再将小数部分相加。 $1 + 3 = 4$ , $0.5 + 0.5 = 1$ 。然后把两部分加起来是 $4 + 1 = 5$ 。所以答案是5。", + "bbox": [ + 115, + 216, + 878, + 237 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "另一种方法是将它们视为货币金额。比如,1.5美元加3.5美元。1.5美元加上3美元是4.5美元,再加上0.5美元就是5美元。这也得到同样的结果。", + "bbox": [ + 115, + 239, + 878, + 260 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "当然,也可以用电子计算器来验证,但我认为手动计算更快。不过,为了确保准确,我再检查一遍。1.5加3.5。10加35等于45,然后小数点一位,所以是4.5。对的, $1.5 + 3.5 = 4.5$ 。或者用竖式计算也可以:", + "bbox": [ + 115, + 261, + 878, + 282 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "1.5", + "bbox": [ + 115, + 285, + 136, + 294 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "+3.5", + "bbox": [ + 115, + 294, + 142, + 305 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "4.5", + "bbox": [ + 115, + 311, + 154, + 324 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "对,这样一看也确实是4.5。所以答案应该是4.5。我之前得到的结果是正确的。", + "bbox": [ + 115, + 325, + 601, + 335 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Alright, I need to calculate what 1.5 plus 3.5 equals. First, both of these numbers are decimals, so it's relatively simple to handle. I can quickly calculate it in my head.", + "bbox": [ + 115, + 337, + 880, + 357 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "First, 1.5 plus 3.5. You can think of them as $1 + 0.5 + 3 + 0.5$ . This way, I can add the integer parts first, then the decimal parts. $1 + 3 = 4$ , and $0.5 + 0.5 = 1$ . Then, adding the two parts together gives $4 + 1 = 5$ . So, the answer is 5.", + "bbox": [ + 115, + 359, + 880, + 378 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Another way is to treat them as currency amounts. For example, 1.5 dollars plus 3.5 dollars. 1.5 dollars plus 3 dollars equals 4.5 dollars, then adding 0.5 dollars gives 5 dollars. This also leads to the same result.", + "bbox": [ + 115, + 379, + 880, + 398 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Of course, you could use a calculator to verify, but I think doing it manually is quicker. However, to ensure accuracy, I will check again. 1.5 plus 3.5. 10 plus 35 equals 45, then move the decimal point one place, so it's 4.5. Yes, $1.5 + 3.5 = 4.5$ . Or you could use column addition:", + "bbox": [ + 115, + 399, + 880, + 428 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "1.5", + "bbox": [ + 115, + 429, + 141, + 438 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "+3.5", + "bbox": [ + 115, + 439, + 142, + 448 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "···", + "bbox": [ + 115, + 449, + 154, + 456 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "4.5", + "bbox": [ + 115, + 458, + 136, + 468 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Yes, looking at it this way, it indeed adds up to 4.5. So the answer should be 4.5. The result I got earlier is correct.", + "bbox": [ + 115, + 469, + 766, + 479 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Final Answer:", + "text_level": 1, + "bbox": [ + 115, + 481, + 203, + 492 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "答案是 4.5", + "bbox": [ + 115, + 495, + 193, + 510 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "The answer is 4.5", + "bbox": [ + 115, + 514, + 230, + 529 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Table 24: Error Case for LRM.", + "bbox": [ + 391, + 544, + 603, + 556 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Categories: analysis question Model: DeepSeek-R1-Distill-Qwen-1.5B", + "text_level": 1, + "bbox": [ + 115, + 590, + 552, + 604 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Question:", + "text_level": 1, + "bbox": [ + 115, + 614, + 176, + 625 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In the sentence \"Napoleon died in 1821\", is \"1821\" a time or a place name?", + "bbox": [ + 115, + 627, + 547, + 640 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Ground Truth:", + "text_level": 1, + "bbox": [ + 115, + 642, + 208, + 653 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Time.", + "bbox": [ + 115, + 657, + 151, + 668 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Thinking process:", + "text_level": 1, + "bbox": [ + 115, + 671, + 223, + 683 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Okay, so I have this sentence: \"Napoleon died in 1821.\" I need to figure out whether \"1821\" is a time or a place name. Hmm, let me think about this step by step.", + "bbox": [ + 115, + 686, + 880, + 707 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "First, I know that \"Napoleon\" is a person, specifically a historical figure. So, if it's the name of a person, then \"1821\" must be related to their life. I'm pretty sure that \"1821\" is a year. I remember that Napoleon was born in 1769 and died in 1821. So, 1821 is definitely a year.", + "bbox": [ + 115, + 708, + 880, + 737 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Wait, but what if I'm not sure? Let me consider other possibilities. Sometimes names can be place names or time periods. For example, if someone's name were a city or a country, the number could be a place. But in this case, \"Napoleon\" is a person, so it's more likely a year.", + "bbox": [ + 115, + 737, + 880, + 766 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Also, thinking about the sentence structure, it's written as \"Napoleon died in 1821.\" The structure \"in\" suggests that 1821 is the place. So, in this context, \"in\" is an article that indicates the location, which would be a place name. That makes me think that \"1821\" is indeed a place name, the year.", + "bbox": [ + 115, + 766, + 880, + 797 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "I don't think it's a time because time is usually expressed as something like \"in the 19th century\" or \"2023.\" So, 1821 is specifically a year, not a time period. It's more precise because it's a specific year when Napoleon died.", + "bbox": [ + 115, + 797, + 880, + 816 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "So, putting it all together, \"Napoleon\" is a person, and \"in 1821\" is indicating the year he died. Therefore, \"1821\" is a place name, specifically a year.", + "bbox": [ + 115, + 818, + 880, + 838 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Final Answer:", + "text_level": 1, + "bbox": [ + 115, + 841, + 203, + 851 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In the sentence \"Napoleon died in 1821,\" the number \"1821\" is a place name, specifically a year. It is used to indicate the year Napoleon died, which is 1821. The use of \"in\" confirms that it refers to a place, not a time period. Therefore, \"1821\" is a year.", + "bbox": [ + 115, + 854, + 880, + 877 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Table 25: Error Case for LRM.", + "bbox": [ + 391, + 894, + 603, + 906 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 487, + 942, + 507, + 954 + ], + "page_idx": 30 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10368/75b91eea-3940-4d8f-9204-ff0cff897b91_model.json b/data/2025/2504_10xxx/2504.10368/75b91eea-3940-4d8f-9204-ff0cff897b91_model.json new file mode 100644 index 0000000000000000000000000000000000000000..43c08f57440f6696765d7b5b686249989a3af047 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/75b91eea-3940-4d8f-9204-ff0cff897b91_model.json @@ -0,0 +1,6961 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.302, + 0.061, + 0.724 + ], + "angle": 270, + "content": "arXiv:2504.10368v3 [cs.CL] 27 May 2025" + }, + { + "type": "title", + "bbox": [ + 0.153, + 0.09, + 0.844, + 0.131 + ], + "angle": 0, + "content": "S1-Bench: A Simple Benchmark for Evaluating System 1 Thinking Capability of Large Reasoning Models" + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.152, + 0.836, + 0.17 + ], + "angle": 0, + "content": "Wenyuan Zhang*, Shuaiyi Nie*, Xinghua Zhang, Zefeng Zhang, Tingwen Liu†" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.17, + 0.774, + 0.186 + ], + "angle": 0, + "content": "Institute of Information Engineering, Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.187, + 0.785, + 0.203 + ], + "angle": 0, + "content": "School of Cyber Security, University of Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.268, + 0.204, + 0.732, + 0.22 + ], + "angle": 0, + "content": "{zhangwenyuan,nieshuaiyi,liutingwen}@iei.ac.cn" + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.262, + 0.341, + 0.276 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.289, + 0.46, + 0.63 + ], + "angle": 0, + "content": "We introduce S1-Bench, a novel benchmark designed to evaluate the performance of Large Reasoning Models (LRMs) on simple tasks that favor intuitive system 1 thinking rather than deliberative system 2 reasoning. While LRMs have achieved significant breakthroughs in complex reasoning tasks through explicit chains of thought, their heavy reliance on system 2 thinking may limit their system 1 thinking capabilities. However, there is a lack of an appropriate benchmark for evaluating LRM's system 1 thinking capabilities. To fill this gap, S1-Bench introduces a suite of simple, diverse, and natural questions across multiple domains and languages, specifically designed to assess LRMs' performance on questions more suitable for system 1. We conduct extensive evaluations across 28 LRMs, revealing their inefficiency, inadequate accuracy, and limited robustness when handling simple questions. Additionally, we observe a gap between their difficulty perception and generation length. Overall, this work paves the way toward dual-system compatibility in the development of LRMs1." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.642, + 0.26, + 0.657 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.668, + 0.424, + 0.683 + ], + "angle": 0, + "content": "\"Simplicity is the ultimate sophistication.\"" + }, + { + "type": "text", + "bbox": [ + 0.325, + 0.683, + 0.487, + 0.696 + ], + "angle": 0, + "content": "— Leonardo da Vinci" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.703, + 0.49, + 0.879 + ], + "angle": 0, + "content": "Recent advances in Large Reasoning Models (LRMs), notably OpenAI's o1/o3 (OpenAI, 2024) and the DeepSeek-R1 (Guo et al., 2025) series, have propelled the development of Large Language Models (LLMs). Unlike traditional LLMs that exhibit intuitive, heuristic system 1 thinking, LRMs demonstrate deliberate and analytical system 2 reasoning (Qu et al., 2025a; Li et al., 2025b) by explicitly generating external chain-of-thought (COT) (Wei et al., 2022) before producing final answers. Through sophisticated strategies such as" + }, + { + "type": "table", + "bbox": [ + 0.525, + 0.26, + 0.869, + 0.432 + ], + "angle": 0, + "content": "
BenchmarkCross DomainRealistic ScenariosMulti-lingualAcc.
AIMEXX6.67
GPQAX24.94
Olympiad-Bench27.94
AMCXX31.88
MATHXX58.30
MMLUX66.27
GSM8KXX87.45
ASDIVXX97.51
GSM8K-zeroXXX77.98
RoR-BenchXXX14.24
S1-Bench (ours)100.00
" + }, + { + "type": "table_caption", + "bbox": [ + 0.509, + 0.442, + 0.884, + 0.485 + ], + "angle": 0, + "content": "Table 1: Characteristics of S1-Bench, with \"Acc.\" representing the average accuracy of four 7-9B LLMs. See Appendix A.1 for more details." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.517, + 0.884, + 0.597 + ], + "angle": 0, + "content": "self-reflection and multi-path exploration (Li et al., 2025a; Yeo et al., 2025), LRMs can achieve strong performance in tasks that require system 2 thinking, including advanced mathematical and competition-level problems (Yang et al., 2025a)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.6, + 0.885, + 0.922 + ], + "angle": 0, + "content": "However, there remains a lack of appropriate benchmarks for evaluating LRMs' system 1 thinking capabilities. Not all real-world problems require system 2 reasoning. The capacity to dynamically identify simple questions and address them efficiently contributes to both resource optimization and improved user satisfaction for LRMs. Nevertheless, current benchmarks either overemphasize difficulty, are simple yet lack domain diversity, or are only not hard for humans but involve unrealistic adversarial designs. Table 1 presents a collection of recent benchmarks aimed at mitigating the overthinking in LRMs (Sui et al., 2025). The majority of these benchmarks are of high difficulty. For example, AIME and GPQA (MAA Committees; Rein et al., 2024) achieve less than \\(30\\%\\) accuracy on conventional small LLMs, which are inherently more suitable for system 2 reasoning. Although some simple mathematical benchmarks are easy enough, such as GSM8K and ASDIV (Cobbe et al.," + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.887, + 0.488, + 0.921 + ], + "angle": 0, + "content": "* denotes equal contribution.† denotes corresponding author. \n1The code and benchmark can be found in https://github.com/WRipple/S1_Bench." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.943, + 0.504, + 0.955 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.261 + ], + "angle": 0, + "content": "2021; Miao et al., 2021), they often suffer from limited domain variety. Furthermore, some tasks that pose little challenge to humans but incorporate adversarial elements tend to lack relevance to realistic scenarios, such as GSM8K-zero (Chiang and Lee, 2024), which includes the correct answer in the questions. Thus, a benchmark to assess the system \\( l \\) thinking capability of LRMs is still lacking, further hindering our understanding of LRMs' cognitive flexibility between the two systems (Ziabari et al., 2025; Qu et al., 2025a)." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.263, + 0.49, + 0.568 + ], + "angle": 0, + "content": "To fill this research gap, we introduce the System 1 Thinking Capability Benchmark (S1-Bench), which measures the performance of LRMs across various simple tasks that commonly encountered in real-world applications. S1-Bench has the following three characteristics: (1) Simple. The questions are not hard for humans and can be easily answered by LLMs. LLMs with 7-9B parameters can robustly provide correct answers through direct responses when sampled across multiple temperatures. (2) Diverse. S1-Bench is not limited to simple reasoning problems; it encompasses four major categories and 28 subcategories in two languages (English and Chinese), including reasoning problems, commonsense knowledge, instruction following, and analytical problems. (3) Natural. The questions are clear, without any misleading elements or ambiguities, ensuring they can be answered intuitively." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.57, + 0.49, + 0.844 + ], + "angle": 0, + "content": "We conduct extensive evaluations on S1-Bench across 28 LRMs, yielding the following key findings: (1) Current LRMs exhibit inefficiency and lack system \\( I \\) thinking capabilities across all types of questions, with average output lengths 15.5 times longer than small LLMs on S1-Bench. (2) Despite employing deep reasoning, several LRMs exhibit under-accuracy and limited robustness on simple questions. (3) LRMs exhibit \"gut moment\" at the beginning of some reasoning processes, showing gut feelings about task difficulty. Yet, even when recognizing a question's simplicity, LRMs often fail to produce shorter responses—revealing a gap between their difficulty awareness and generation behavior. These findings emphasize the significant distance LRMs must traverse to become powerful dual-system compatible models." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.844, + 0.49, + 0.859 + ], + "angle": 0, + "content": "Our contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.874, + 0.49, + 0.922 + ], + "angle": 0, + "content": "- To the best of our knowledge, S1-Bench is the first benchmark to evaluate the system 1 thinking capabilities of LRMs, which paves" + }, + { + "type": "text", + "bbox": [ + 0.545, + 0.085, + 0.835, + 0.101 + ], + "angle": 0, + "content": "the way for dual-system compatibility." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.102, + 0.882, + 0.132 + ], + "angle": 0, + "content": "- We introduce a workflow for constructing a simple dataset for system 1 evaluation." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.134, + 0.884, + 0.181 + ], + "angle": 0, + "content": "- Extensive experiments reveal the inefficiency, under-accuracy, and limited robustness of LRRMs on simple questions." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.182, + 0.885, + 0.245 + ], + "angle": 0, + "content": "- We find that LRMs exhibit \"gut moment\" on simple problems, and reveal a gap between their difficulty perception and generation length." + }, + { + "type": "list", + "bbox": [ + 0.532, + 0.102, + 0.885, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.26, + 0.662, + 0.275 + ], + "angle": 0, + "content": "2 Related work" + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.287, + 0.753, + 0.302 + ], + "angle": 0, + "content": "2.1 Large Reasoning Models" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.308, + 0.885, + 0.614 + ], + "angle": 0, + "content": "Large Reasoning Models (LRMs), characterized by explicitly generating external thinking processes before final answers (Kumar et al., 2025b; Chen et al., 2025a), achieve a paradigm shift from intuitive system 1 thinking to deliberative system 2 reasoning compared to traditional LLMs (Li et al., 2025b; Qu et al., 2025a), thus achieving superior performance on complex tasks. The development of recent LRMs has largely followed two main approaches: large-scale reinforcement learning (RL) and model distillation. Models trained via largescale RL (Guo et al., 2025; Team, 2025b; Team et al., 2025b) leverage reward-based optimization to gradually incentivize deliberative reasoning. In contrast, distillation-based LRMs (OpenAI, 2024; Min et al., 2024; Team, 2025a; Ye et al., 2025; Muennighoff et al., 2025; Zhang et al., 2025b) acquire such abilities by transferring structured reasoning patterns from advanced teacher models." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.627, + 0.722, + 0.641 + ], + "angle": 0, + "content": "2.2 Limitations of LRRMs" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.648, + 0.885, + 0.922 + ], + "angle": 0, + "content": "While LRMs have shown significant performance gains through deliberate reasoning, rigid adherence to this overly cautious thinking can introduce new limitations. On one hand, intermediate reasoning steps can cause excessive token generation and unnecessary solving attempts (Chen et al., 2024b; Hashemi et al., 2025; Kumar et al., 2025a), even leading to redundancy in the hidden layers (Chen et al., 2024c, 2025b). On the other hand, LRMs' performance can drop in specific contexts like safety scenarios (Jiang et al., 2025) and role-playing (Feng et al., 2025). However, prior studies mainly evaluated LRMs on complex tasks that more suited for deliberative system 2 thinking. Our work examines how deliberative reasoning impacts extremely simple problems better matched to intuition-driven system 1 processing." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.505, + 0.955 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.116, + 0.081, + 0.56, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.568, + 0.081, + 0.883, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.155, + 0.248, + 0.84, + 0.264 + ], + "angle": 0, + "content": "Figure 1: Construction workflow for S1-Bench and an illustrative example from each major category." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.287, + 0.232, + 0.303 + ], + "angle": 0, + "content": "3 S1-Bench" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.313, + 0.489, + 0.489 + ], + "angle": 0, + "content": "We introduce S1-Bench, a bilingual, multi-domain benchmark designed to evaluate system 1 thinking capability of LRM on extremely simple questions. These questions are easily solvable by traditional LLMs and not hard for humans. S1-Bench, which covers both English and Chinese, is organized into four major categories: reasoning (RSN), knowledge (KNO), instruction following (IF) and analysis (ANA), representing major dimensions commonly employed in LLM capability evaluation (Zheng et al., 2023; Chang et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.49, + 0.49, + 0.572 + ], + "angle": 0, + "content": "This section begins with how simplicity is ensured, then the detailed construction workflow for S1-Bench, and concludes with an overview of the dataset statistics. Figure 1 shows the construction workflow and an illustrative example per category." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.582, + 0.373, + 0.598 + ], + "angle": 0, + "content": "3.1 How to Ensure Simplicity?" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.603, + 0.49, + 0.636 + ], + "angle": 0, + "content": "We ensure questions are simple and suitable for system 1 thinking through the following two aspects." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.645, + 0.421, + 0.66 + ], + "angle": 0, + "content": "3.1.1 A Priori Simplicity Constraints" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.664, + 0.489, + 0.728 + ], + "angle": 0, + "content": "We begin by generating question-answer pairs through collaboration between humans and LLMs. Each pair is required to satisfy both the general and the category-specific simplicity criteria." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.729, + 0.489, + 0.808 + ], + "angle": 0, + "content": "The general simplicity criteria requires that: (1) Questions must be naturally and clearly expressed, unambiguous, and free of intentional traps. (2) Answers must be unique or easily falsifiable (e.g., providing a three-letter English word)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.81, + 0.49, + 0.922 + ], + "angle": 0, + "content": "The category-specific simplicity criteria are as follows. RSN: Limited to problems solvable with minimal reasoning or intuition. KNO: Restricted to common knowledge with unique, verifiable answers from sources like Wikipedia. IF: Involve straightforward instructions without strict formatting requirements. ANA: Limited to questions" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.288, + 0.885, + 0.352 + ], + "angle": 0, + "content": "whose answers can be directly inferred from the prompt, such as binary classification. These constraints ensure all questions remain straightforward for human respondents." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.364, + 0.845, + 0.379 + ], + "angle": 0, + "content": "3.1.2 A Posteriori Simplicity Verification" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.384, + 0.885, + 0.495 + ], + "angle": 0, + "content": "Due to the biases existing between language models and humans (Gallegos et al., 2024), questions that are simple for humans may be difficult for LLMs. Therefore, we introduce additional posteriori verification to ensure that questions are simple enough to be correctly and robustly answered by smaller LLMs from different families." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.51, + 0.742, + 0.524 + ], + "angle": 0, + "content": "3.2 Construction Workflow" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.532, + 0.884, + 0.659 + ], + "angle": 0, + "content": "Subcategory Preparation. To ensure diversity, we refer to the subcategories included in existing benchmarks (e.g., MMLU, IFEval, and GSM8K) and evaluation surveys (Chang et al., 2024) to select, merge, or design subcategories for S1-bench, ensuring that each meets the simplicity requirements. The definition and example question for each subcategory can be found in Appendix A.2." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.662, + 0.885, + 0.886 + ], + "angle": 0, + "content": "Implementation of A Priori Simplicity. First, we use two data generators² to create 100 initial bilingual question-answer pairs for each candidate subcategory. The data generation prompt (see Appdiix A.3) explicitly incorporates the subcategory definitions, along with both the general and category-specific simplicity criteria, while also aiming to ensure diversity in the generated questions. Second, these question-answer pairs are then independently evaluated by three annotators and two quality discriminators³ according to the general and category-specific simplicity criteria (see Appdiix A.3 for prompt of discriminators), resulting in five evaluation outcomes per pair. The three an" + }, + { + "type": "page_footnote", + "bbox": [ + 0.529, + 0.897, + 0.844, + 0.908 + ], + "angle": 0, + "content": "2We select Claude-3.7-Sonnet and Qwen2.5-72B-Instruct." + }, + { + "type": "page_footnote", + "bbox": [ + 0.531, + 0.909, + 0.783, + 0.92 + ], + "angle": 0, + "content": "3We select GPT-4o and DeepSeek-V3-241226." + }, + { + "type": "list", + "bbox": [ + 0.529, + 0.897, + 0.844, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.505, + 0.954 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.132, + 0.088, + 0.474, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.209, + 0.49, + 0.239 + ], + "angle": 0, + "content": "Figure 2: Statistical distribution of token counts for S1-Bench questions." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.264, + 0.489, + 0.359 + ], + "angle": 0, + "content": "notators are experienced graduate students familiar with LLMs and well-acquainted with the goals of S1-Bench. Finally, based on these evaluation outcomes, three annotators discuss and collectively decide whether to retain, modify, or discard each question." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.361, + 0.49, + 0.633 + ], + "angle": 0, + "content": "Implementation of A Posteriori Simplicity. First, each question obtained from the previous stage is input into the small LLM \\( \\text{validators}^4 \\) with 7~9 B parameters. For each question, we sample 10 answers at three different temperature settings (0, 0.2, and 0.4), resulting in a total of 30 responses per question. These responses are then individually evaluated for correctness using GPT-4o. Second, if all 30 sampled responses are correct, the question is accepted into S1-Bench. Otherwise, the question is returned to the generators, where a difficulty-reduction prompt (see Appendix 10) is applied to simplify it. The simplified questions then undergoes the same subsequent process. Finally, questions fail to meet the full-accuracy criterion (i.e., 30 out of 30 correct) after three rounds of difficulty reduction are excluded from the workflow." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.634, + 0.49, + 0.698 + ], + "angle": 0, + "content": "The final S1-Bench comprises questions that satisfy both human-based a priori simplicity constraints and LLM-based a posteriori simplicity verification." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.71, + 0.329, + 0.725 + ], + "angle": 0, + "content": "3.3 Benchmark Statistics" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.731, + 0.489, + 0.891 + ], + "angle": 0, + "content": "S1-Bench comprises 422 question-answer pairs across four major categories and 28 subcategories, balanced with 220 English and 202 Chinese questions. Figure 2 shows the token length distribution, with questions averaging 14.46 tokens. To ensure that the a posteriori verification process does not introduce simplicity only tailored to the small LLM validator, we evaluate S1-Bench on five additional LLMs and on Qwen3 Family with reasoning modes disabled. As shown, even the 1.7B model achieves" + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.082, + 0.883, + 0.267 + ], + "angle": 0, + "content": "
Modelt=0.0t=0.2t=0.4Tokens
Gemma2-9B100.00100.00100.0038.77
Llama3.1-8B100.00100.00100.0042.00
Mistral-8B100.00100.00100.0044.38
Qwen2.5-7B100.00100.00100.0042.81
DeepSeek-v3100.00100.00100.0079.53
Llama3.3-70B100.0099.7699.7653.71
Qwen2.5-14B99.7499.7699.7640.00
Qwen2.5-32B99.9899.9899.9843.17
Qwen2.5-72B100.00100.00100.0044.61
Qwen3-32B (w/o think)100.00100.00100.00103.30
Qwen3-14B (w/o think)100.00100.00100.0086.35
Qwen3-8B (w/o think)100.00100.0099.7690.54
Qwen3-1.7B (w/o think)98.1097.1695.73114.32
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.276, + 0.883, + 0.319 + ], + "angle": 0, + "content": "Table 2: Average accuracy (acc@k) and response token count of different LLMs, each sampled 10 times at three temperature settings." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.345, + 0.782, + 0.36 + ], + "angle": 0, + "content": "over \\(98\\%\\) accuracy at temperature 0." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.372, + 0.698, + 0.388 + ], + "angle": 0, + "content": "4 Main Experiment" + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.398, + 0.839, + 0.414 + ], + "angle": 0, + "content": "4.1 Baseline Models and Configurations" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.418, + 0.886, + 0.725 + ], + "angle": 0, + "content": "We evaluated 28 different LRMs, which are explicitly trained to first respond with a thinking process, and then generate a final answer. These LRMs include open-source families, such as DeepSeek (Guo et al., 2025), Qwen (Yang et al., 2025a), Nemotron (Bercovich et al., 2025), LightR1 (Wen et al., 2025), s1.1 (Muennighoff et al., 2025), EXAONE (Research et al., 2025), and SkyT1 (Griggs et al., 2025), as well as closed-source Hunyuan-T1 (Tencent, 2025), spanning from tiny (1.5B) to large (671B) parameter sizes5. Notably, OpenAI's o-series models are not included as they do not disclose thinking processes to users. For each model, we consider two sets of generation configurations: Greedy sampling with temperature \\( t = 0 \\); Top-p sampling with temperature \\( t = 0.6 \\), topp=0.95 and sampling size \\( k = 5 \\). Only top-p sampling results are reported in the main text; greedy decoding results are provided in the Appendix C.1." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.735, + 0.707, + 0.749 + ], + "angle": 0, + "content": "4.2 Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.755, + 0.884, + 0.901 + ], + "angle": 0, + "content": "Format Metrics. To assess the formatting quality of LRM responses, we compute the proportion of responses that satisfy the following two formatting criteria (averaged over 5 runs for top-p sampling). S-Corr (Strict Format Correctness Rate): In general, an end thinking marker (e.g., \\( \\langle \\) /think \\( \\rangle \\)) is expected to separate the thinking process from the non-empty final answer. S-Corr measures the proportion of responses that satisfy this criterion." + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.899, + 0.489, + 0.92 + ], + "angle": 0, + "content": "4We select four small LLMs: Qwen2.5-7B, Llama3.1-8B, Mistral8B, and Gemma2-9B. The full model IDs are detailed in Table B.1." + }, + { + "type": "page_footnote", + "bbox": [ + 0.529, + 0.908, + 0.782, + 0.921 + ], + "angle": 0, + "content": "Model details are presented in Appendix B.1." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.505, + 0.955 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.198, + 0.083, + 0.803, + 0.4 + ], + "angle": 0, + "content": "
Model IDSizeLoose FormatStrict FormatL-Corr ↑S-Corr ↑Tokens ↓
pass@1↑acc@k↑pass@1↑acc@k↑
Validator LLMs7-9B100.00100.00100.00100.00--42.00
Qwen3-A22B235B99.9199.7699.9199.76100.00100.00701.65
Qwen3-A3B30B99.9599.7699.9599.76100.00100.00638.40
QwQ-32B32B100.00100.00100.00100.00100.00100.00720.10
Qwen3-32B32B99.9199.5399.9199.5399.9199.91668.69
Qwen3-14B14B99.9599.7699.9599.7699.9599.95582.99
Qwen3-8B8B99.9599.7699.9599.7699.9599.95657.76
Qwen3-1.7B1.7B99.3497.3999.3497.3999.8199.81595.90
Hunyuan-T1-99.9199.5399.9199.53100.00100.00542.31
DS-R1671B100.00100.00100.00100.00100.00100.00646.40
DS-R1-70B70B99.4897.3999.3896.92100.0099.91453.81
DS-R1-32B32B99.7298.8299.7298.82100.00100.00429.91
DS-R1-14B14B99.5797.8799.5797.87100.00100.00475.46
DS-R1-8B8B97.4497.1697.3997.1699.7699.53452.11
DS-R1-7B7B95.2185.7895.2185.7899.2499.24454.55
DS-R1-1.5B1.5B81.4754.5081.4754.5097.5897.58489.54
Sky-T1-32B32B98.8294.7994.8879.6299.4895.26163.00
Nemotron-49B49B99.1597.3999.1597.39100.00100.00362.54
Nemotron-8B8B86.1669.9179.8159.0099.4384.31372.57
L-R1-32B32B97.8791.0094.7479.6298.9195.071095.36
L-R1-32B-DS32B99.5798.1099.5798.1099.8199.81524.12
L-R1-14B-DS14B99.0595.9799.0595.9799.1999.19693.19
L-R1-7B-DS7B94.6483.6594.6483.6599.7699.67496.47
s1.1-32B32B99.5398.3499.4898.1099.5799.53998.00
s1.1-14B14B97.6393.6097.2591.9497.7797.39839.86
s1.1-7B7B96.6888.3988.5863.9897.1188.96711.49
EXAONE-32B32B97.0694.0897.0694.0899.8199.81800.56
EXAONE-7.8B7.8B88.1575.1287.8274.4198.7298.061046.87
EXAONE-2.4B2.4B72.4256.1672.3256.1697.4497.251593.96
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.41, + 0.884, + 0.44 + ], + "angle": 0, + "content": "Table 3: Main results in the top-p sampling setting on the S1-Bench, sorted by model family. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.464, + 0.489, + 0.546 + ], + "angle": 0, + "content": "L-Corr (Loose Format Correctness Rate): LRMs may occasionally generate responses with endless thinking. L-Corr quantifies the proportion of responses that do not exhibit this failure mode. Detailed format types are given in Appendix B.4." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.566, + 0.49, + 0.631 + ], + "angle": 0, + "content": "Efficiency Metrics. We calculate the average token counts for responses (Tokens) except for those generate endless thinking. Token counts are obtained using the Qwen2.5 tokenizer." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.652, + 0.49, + 0.877 + ], + "angle": 0, + "content": "Accuracy Metrics. We calculate accuracy metrics under both strict and loose formatting requirements, respectively. We use GPT-4o as the evaluator to assess the correctness of the responses6, with the evaluation prompt in Appendix B.2. For greedy sampling, we directly calculate the accuracy rate. For top-p sampling, we utilize two metrics: Pass@1 and Acc@k. Pass@1 follows DeepSeekR1 (Guo et al., 2025), and Acc@k is the percentage of questions with all k answers correct. The two metrics use k=5, and their detailed definitions can be found in Appendix B.3. Notably, S-Corr \\ L-Corr represents the upper bound for pass@1 and acc@5 under strict \\ loose formatting requirements." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.464, + 0.661, + 0.479 + ], + "angle": 0, + "content": "4.3 Main Results" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.489, + 0.884, + 0.521 + ], + "angle": 0, + "content": "Table 3 and Figure 8 presents the main results of LRMs on S1-Bench, revealing two key phenomena." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.536, + 0.885, + 0.922 + ], + "angle": 0, + "content": "LRMs exhibit significantly lower efficiency than LLMs on S1-Bench, and no clear correlation is observed between ART and model size. We observed the following: First, state-of-the-art LRMs, such as DeepSeek-R1 and Qwen3, do not demonstrate a distinct advantage in efficiency. In contrast, Sky-T1-32B, which undergoes specific optimizations to mitigate overthinking using SimPO, achieves the highest efficiency. Second, The L-R1-DS 7B/14B/32B models are further post-trained from the DS-R1-7B/14B/32B models. The L-R1-DS models tend to produce longer responses, suggesting that while additional post-training may enhance the model's capability for complex reasoning, it comes at the cost of response efficiency. Finally, the s1.1 models generate considerably longer responses than the DeepSeek-R1-Distilled models. Despite both models being trained solely with SFT to acquire long-COT reasoning ability, the DeepSeek-R1-Distilled models use 800K training samples, while the s1.1 models are trained on only 1K. This discrepancy suggests that the smaller training set may lead to superficial imitation of long reasoning patterns, resulting in verbose thinking on" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.899, + 0.489, + 0.922 + ], + "angle": 0, + "content": "If a final answer can be isolated, only the final answer is evaluated; otherwise, the entire response is assessed." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.505, + 0.955 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.161, + 0.082, + 0.837, + 0.505 + ], + "angle": 0, + "content": "
Model IDSizeS1-Bench-ENS1-Bench-ZHAvg
RSNKNOIFANAAvgRSNKNOIFANAAvg
Gemma2-9B9B74.829.45.352.445.951.619.87.535.131.038.8
Llama3.1-8B8B91.035.412.461.956.044.028.315.218.726.742.0
Qwen2.5-7B7B65.546.36.449.646.550.546.69.836.938.842.8
Mistral-8B8B67.255.58.650.149.647.356.114.829.738.744.4
Column Avg-74.641.68.253.549.548.337.711.830.133.842.0
Sky-T1-32B32B215.8174.198.5233.3194.3125.5125.399.4145.5128.9163.0
Nemotron-49B49B599.7587.6396.5526.1540.4232.9157.3235.5107.8168.8362.5
Nemotron-8B8B561.0585.1458.0303.1462.6369.5326.0288.1166.7273.5372.6
DS-R1-32B32B421.8504.4414.7521.1473.7362.2385.6343.1408.8382.2429.9
DS-R1-8B8B472.2528.9530.7462.7491.2521.9404.4266.2395.5409.4452.1
DS-R1-70B70B464.1501.3378.5536.1484.0450.8450.2328.4416.7420.9453.8
DS-R1-7B7B447.5623.9353.8510.0495.5446.5463.2339.5373.0409.4454.5
DS-R1-14B14B503.7674.7367.3494.2519.0452.0465.4375.3405.8428.0475.5
DS-R1-1.5B1.5B480.8584.7417.4577.2529.1493.0497.4329.8423.1446.0489.5
L-R1-7B-DS7B568.1667.1501.7566.3580.3444.8454.6344.1366.4405.0496.5
L-R1-32B-DS32B574.5706.6647.6632.8636.3431.2367.0377.1418.7402.2524.1
Hunyuan-T1-561.6693.8380.9435.0521.2676.8553.8505.1523.8565.3542.3
Qwen3-14B14B700.4639.5286.2575.0579.8730.4557.2403.1586.0586.5583.0
Qwen3-1.7B1.7B790.4720.6399.9526.2624.6689.8563.6406.4545.9564.7595.9
Qwen3-A3B30B745.0729.3328.1594.8625.7773.7655.8453.7648.6652.2638.4
DS-R1671B786.1723.8711.4529.2672.5727.3638.5607.9533.9617.9646.4
Qwen3-8B8B853.7753.1394.4629.5683.2749.2623.8459.3624.0630.0657.8
Qwen3-32B32B805.7774.2356.9645.5674.7780.2695.2446.6645.3662.1668.7
L-R1-14B-DS14B951.01026.0829.8653.5848.2594.7610.1442.2451.7525.7693.2
Qwen3-A22B235B925.3864.3487.2605.7734.5803.3713.4487.2611.3665.9701.7
s1.1-7B7B1039.5840.81923.2529.4929.9489.6351.31034.3332.4475.6711.5
QwQ-32B32B873.3808.1520.8634.7722.4866.9707.3613.3667.7717.6720.1
EXAONE-32B32B1323.71057.61537.0711.61086.4703.2348.61302.9125.5490.3800.6
s1.1-14B14B871.8746.22233.1708.1960.2654.6546.01512.6579.7710.7839.9
s1.1-32B32B1077.9889.72055.4781.71081.7995.6765.21634.6666.5906.5998.0
EXAONE-7.8B7.8B1498.31398.91775.7882.41303.81410.3497.81633.1205.0767.01046.9
L-R1-32B32B1614.01217.31996.9930.11338.31035.6737.71240.7610.2835.31095.4
EXAONE-2.4B2.4B1927.31426.21200.1825.71320.72469.71622.62471.61511.21898.71594.0
Column Avg-809.1766.0785.1591.5718.4695.8545.9677.9482.0576.7650.3
Improvement-×10.8×18.4×96.0×11.1×14.5×14.4×14.5×57.3×16.0×17.1×15.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.516, + 0.884, + 0.559 + ], + "angle": 0, + "content": "Table 4: Average response tokens in the top-p sampling setting on the S1-bench across two languages and four main categories. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst. Bold represents the maximum Improvement value for each language." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.585, + 0.248, + 0.6 + ], + "angle": 0, + "content": "simple questions." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.613, + 0.49, + 0.806 + ], + "angle": 0, + "content": "Several LRMs exhibit under-accuracy and limited robustness on simple questions. First, our observations find that, despite employing deep reasoning, most LRMs tend to exhibit lower accuracy on simple questions compared to traditional LLMs. For example, DS-R1-1.5B and EXAONE-2.4B achieve just above \\(50\\%\\) acc@k. Second, many LRMs struggle with robust correctness in top-p sampling, where acc@k is significantly lower than pass@1. This issue is particularly pronounced in smaller LRMs. For instance, DS-R1-1.5B achieved \\(81.47\\%\\) pass@1 but only \\(54.50\\%\\) acc@k." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.822, + 0.315, + 0.837 + ], + "angle": 0, + "content": "5 Efficiency Analysis" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.851, + 0.406, + 0.867 + ], + "angle": 0, + "content": "5.1 Analysis across Question Types" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.874, + 0.49, + 0.922 + ], + "angle": 0, + "content": "To better understand the efficiency differences of LRRMs across question types, we analyze the average response tokens across 4 main categories, 28" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.584, + 0.882, + 0.616 + ], + "angle": 0, + "content": "subcategories, and two languages. The results are displayed in Table 4 and Appendix C.2." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.624, + 0.884, + 0.817 + ], + "angle": 0, + "content": "LRMs exhibit a substantial increase in response length across all four major categories, 28 subcategories, and two languages. As shown in Table 4, for each of the four major categories, the average response length of LRMs exceeds that of LLMs by more than a factor of ten. Response lengths also increase significantly across all subcategories (see Appendix C.2). This suggests that while LRMs are primarily trained on reasoning data to produce long CoT style responses, this stylistic pattern generalizes well across a wide range of question types." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.818, + 0.882, + 0.849 + ], + "angle": 0, + "content": "Moreover, 23 out of the 28 LRMs produce longer responses to questions in English than Chinese." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.858, + 0.882, + 0.921 + ], + "angle": 0, + "content": "LRMs exhibit the most significant increase in ART for instruction following questions and tend to over-exlore when the solution space is vast. As shown in Table 4, although small LLMs" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.943, + 0.506, + 0.955 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.083, + 0.482, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.293, + 0.49, + 0.337 + ], + "angle": 0, + "content": "Figure 3: (a) Comparison of first round and additional token costs for each LRM. (b) Distribution of solution rounds for each LRM." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.367, + 0.49, + 0.655 + ], + "angle": 0, + "content": "provide the most concise responses to instruction following questions, LRMs generate dramatically longer outputs—96.0 times longer in English and 57.3 times longer in Chinese than small LLMs. To investigate the cause, we further analyze the subcategories of instruction following questions. As shown in Appendix C.2, average tokens is notably longer in the subcategories of length constraints, character constraints, and sentence constraints. These three question types share a similar characteristic: their correctness is easy to verify, but the solution space is vast. We find that, although the model quickly identifies a correct answer, it becomes trapped in the search space, continually exploring alternatives and failing to stop in time. A case can be seen in Table 21. This phenomenon is more pronounced in families with lower efficiency, such as s1.1 and EXAONE." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.676, + 0.377, + 0.692 + ], + "angle": 0, + "content": "5.2 Thinking Solution Analysis" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.702, + 0.49, + 0.862 + ], + "angle": 0, + "content": "To better understand the causes of inefficiency in LRMs on S1-Bench, we analyze the solution rounds of their thinking processes7. We first use DeepSeek-v3 to segment each thinking process into several solutions, each defined as a point at which LRMs explicitly arrives at a conclusion that matches the correct answer. We then compute the average token counts in the first solution. The detailed experimental setup is provided in Appendix C.3. Our analysis reveals the following:" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.085, + 0.877, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.231, + 0.885, + 0.302 + ], + "angle": 0, + "content": "Figure 4: Distribution of the thinking process across four categories. FA and TP refer to Final Answer and Thinking Process, respectively. Green bars indicate cases where the final answer is correct, while red bars indicate cases where it is incorrect." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.331, + 0.885, + 0.428 + ], + "angle": 0, + "content": "The token consumed in the first solution of LRMs significantly exceeds that of validator LLMs, as shown in Figure 3 (a). This suggests that LRMs may involve unnecessary reasoning steps in each solution, which could be one of the reasons for their inefficiency." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.441, + 0.885, + 0.683 + ], + "angle": 0, + "content": "The primary reason for efficiency gaps between LRMs lies in the number of redundant solution rounds they generate, rather than the token cost in the initial round. As shown in Figure 3 (a), although total thinking token counts vary widely across LRMs, their token counts in the initial round are similar and only account for a small fraction of the total. Figure 3 (b) further shows the distribution of solution rounds on S1-Bench, revealing that LRMs with longer thinking processes tend to generate more solution round, and this redundancy greatly increases computational cost. Furthermore, further experiments reveal that the redundancy in the reasoning process gradually increases over time. Appendix C.4 presents the experimental details." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.7, + 0.673, + 0.716 + ], + "angle": 0, + "content": "6 Error Analysis" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.729, + 0.885, + 0.922 + ], + "angle": 0, + "content": "This section analyzes the errors made in the thinking process. Specifically, we utilize DeepSeekv3 to categorize the responses of LRMs into four cases and compute the corresponding proportions: (1) Final answer correct; thinking process entirely accurate. (2) Final answer correct; thinking process contains intermediate errors. (3) Final answer incorrect; correct answer mentioned in thinking process. (4) Final answer incorrect; correct answer never mentioned in thinking process. The classification details are in Appendix C.5; results are shown in Figure 4. Key findings include:" + }, + { + "type": "page_footnote", + "bbox": [ + 0.113, + 0.879, + 0.489, + 0.922 + ], + "angle": 0, + "content": "7We only analyze well-formatted thinking processes with correct final answers, as incorrect answers make it unclear whether LRMs are over-reasoning or under-reasoning, and malformed thinking processes cannot be precisely extracted." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.942, + 0.505, + 0.954 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.13, + 0.085, + 0.475, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.295, + 0.49, + 0.325 + ], + "angle": 0, + "content": "Figure 5: Top: Count of \"gut moments\" across models. Bottom: Probability of \"gut moments\" by question type." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.348, + 0.49, + 0.492 + ], + "angle": 0, + "content": "Lower-accuracy LRMs tend to produce less reliable reasoning chains; even when they arrive at the correct final answer, their intermediate steps often contain errors (light green). LRMs with high accuracy (e.g., DS-R1) show almost no flawed reasoning steps, whereas those with lower accuracy (e.g., DS-R1-1.5B) often generate incorrect intermediate conclusions, further indicating that they lack robust reasoning ability." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.501, + 0.49, + 0.63 + ], + "angle": 0, + "content": "Although LRMs sometimes mention the correct answer during reasoning, they may deviate and ultimately produce incorrect final answers (light red). In one case, the LRM initially arrived at the correct answer but undermined it through excessive verification, a case can be seen in Table 24. In another case, the LRM directly denies the correct answer, a case can be seen in Table 23." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.641, + 0.263, + 0.656 + ], + "angle": 0, + "content": "7 Gut Moment" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.666, + 0.49, + 0.856 + ], + "angle": 0, + "content": "We observe an intriguing phenomenon on S1-Bench: LRMs sometimes show an early sense of question difficulty before solving, which we call the \"gut moment.\" To explore this phenomenon, we prompt GPT-4o to classify the initial part of model responses (before the first \"\\n\\ntypes based on its comment on difficulty: easy, neutral, difficult, and no comment. Figure 5 presents these classifications and their probabilities across four question types. Experimental details and cases are in Appendix C.6. This leads to the following observations:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.859, + 0.49, + 0.891 + ], + "angle": 0, + "content": "First, all LRMs show the \"gut moment\" phenomenon to varying degrees, which is more evident" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.086, + 0.871, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.216, + 0.883, + 0.245 + ], + "angle": 0, + "content": "Figure 6: Average response tokens in the easy category vs. all samples. Dots show difference: easy minus all." + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.278, + 0.885, + 0.488 + ], + "angle": 0, + "content": "in the Qwen, DeepSeek, and Light-R1 families and Hunyuan-T1. Second, LRMs show stylistic differences in expressing \"gut moment.\" For example, the Qwen family often views questions as simple, whereas the DeepSeek-distilled models show more diverse difficulty comments. Third, some LRMs show significantly stronger \"gut moment\" in Chinese than in English, such as the Qwen and DeepSeek families, likely due to a higher proportion of Chinese in their training data. Finally, the \"gut moment\" is most evident in reasoning questions and rarely appears in analytical questions, except in DeepSeek-distilled models." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.492, + 0.884, + 0.701 + ], + "angle": 0, + "content": "To investigate whether the early sense of a question as \"easy\" leads to a corresponding reduction in response length, we compare the average response tokens for questions in the easy category versus all samples. The results are shown in Figure 6. Except for L-R1-32B, other LRMs do not exhibit a noticeable decrease in response length when questions are viewed as \"easy\"; in fact, 21 out of 28 LRMs showed an increase in response length under this condition. This suggests a discrepancy between the LRM's initial sense of difficulty and its generative behavior, the causes and improvements of which warrant further investigation." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.726, + 0.642, + 0.74 + ], + "angle": 0, + "content": "8 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.761, + 0.885, + 0.922 + ], + "angle": 0, + "content": "This paper introduces S1-Bench, the first benchmark designed to evaluate system 1 thinking capabilities in LRMs. We conduct extensive evaluations across 28 LRMs, revealing their inefficiency, inadequate accuracy, and limited robustness when handling simple questions. Additionally, we observe \"gut moment\" and find a gap between their difficulty perception and generation length. Overall, this work paves the way toward dual-system compatibility in the development of LRMs." + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.899, + 0.488, + 0.921 + ], + "angle": 0, + "content": "8Derived from \"gut feeling,\" meaning intuition-based judgment without analysis." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.505, + 0.955 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.221, + 0.1 + ], + "angle": 0, + "content": "Limitations" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.112, + 0.493, + 0.368 + ], + "angle": 0, + "content": "Although S1-Bench pioneers the evaluation of system 1 thinking in LRMs, it still has several limitations. First, due to our emphasis on ensuring the uniqueness of each sample during dataset construction—for instance, including only one question for basic arithmetic operations such as addition, subtraction, and multiplication—the overall scale of the benchmark remains limited. As a next step, we plan to expand the scale of S1-Bench. Second, while recent months have seen a surge in newly released open-source LRMs, we have only evaluated 28 representative models and have not covered the full spectrum of available models. Lastly, we do not propose methods to improve the efficiency of LRMs on system 1 tasks in this work; this will be the focus of our future research." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.398, + 0.214, + 0.414 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.422, + 0.49, + 0.476 + ], + "angle": 0, + "content": "Pranjal Aggarwal and Sean Welleck. 2025. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.488, + 0.295, + 0.501 + ], + "angle": 0, + "content": "AI-MO. 2024. Amc 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.514, + 0.489, + 0.554 + ], + "angle": 0, + "content": "Daman Arora and Andrea Zanette. 2025. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.567, + 0.488, + 0.62 + ], + "angle": 0, + "content": "Simon A Aytes, Jinheon Baek, and Sung Ju Hwang. 2025. Sketch-of-thought: Efficient llm reasoning with adaptive cognitive-inspired sketching. arXiv preprint arXiv:2503.05179." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.633, + 0.49, + 0.737 + ], + "angle": 0, + "content": "Akhiad Bercovich, Itay Levy, Izik Golan, Mohammad Dabbah, Ran El-Yaniv, Omri Puny, Ido Galil, Zach Moshe, Tomer Ronen, Najeeb Nabwani, Ido Shahaf, Oren Tropp, Ehud Karpas, Ran Zilberstein, Jiaqi Zeng, Soumye Singhal, Alexander Bukharin, Yian Zhang, Tugrul Konuk, and 113 others. 2025. Llamameton: Efficient reasoning models. Preprint, arXiv:2505.00949." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.75, + 0.49, + 0.83 + ], + "angle": 0, + "content": "Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, and 1 others. 2024. A survey on evaluation of large language models. ACM transactions on intelligent systems and technology, 15(3):1-45." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.842, + 0.49, + 0.92 + ], + "angle": 0, + "content": "Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. 2025a. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.422, + 0.49, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.883, + 0.14 + ], + "angle": 0, + "content": "Xiaoshu Chen, Sihang Zhou, Ke Liang, and Xinwang Liu. 2024a. Distilling reasoning ability from large language models with adaptive thinking. arXiv preprint arXiv:2404.09170." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.148, + 0.885, + 0.226 + ], + "angle": 0, + "content": "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Quzhi Liu, Mengfei Zhou, Zhuosheng Zhang, and 1 others. 2024b. Do not think that much for \\(2 + 3 = ?\\) on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.235, + 0.884, + 0.3 + ], + "angle": 0, + "content": "Yilong Chen, Junyuan Shang, Zhengyu Zhang, Jiawei Sheng, Tingwen Liu, Shuohuan Wang, Yu Sun, Hua Wu, and Haifeng Wang. 2024c. Mixture of hidden-dimensions transformer. arXiv preprint arXiv:2412.05644." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.309, + 0.884, + 0.388 + ], + "angle": 0, + "content": "Yilong Chen, Junyuan Shang, Zhenyu Zhang, Yanxi Xie, Jiawei Sheng, Tingwen Liu, Shuohuan Wang, Yu Sun, Hua Wu, and Haifeng Wang. 2025b. Inner thinking transformer: Leveraging dynamic depth scaling to foster adaptive internal thinking. arXiv preprint arXiv:2502.13842." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.397, + 0.885, + 0.488 + ], + "angle": 0, + "content": "Cheng-Han Chiang and Hung-yi Lee. 2024. Overreasoning and redundant calculation of large language models. In Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 2: Short Papers), pages 161-169, St. Julian's, Malta. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.497, + 0.884, + 0.575 + ], + "angle": 0, + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, and 1 others. 2021. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.584, + 0.884, + 0.663 + ], + "angle": 0, + "content": "Yingqian Cui, Pengfei He, Jingying Zeng, Hui Liu, Xianfeng Tang, Zhenwei Dai, Yan Han, Chen Luo, Jing Huang, Zhen Li, and 1 others. 2025. Stepwise perplexity-guided refinement for efficient chain-of-thought reasoning in large language models. arXiv preprint arXiv:2502.13260." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.671, + 0.885, + 0.737 + ], + "angle": 0, + "content": "Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, and 1 others. 2025. Dynamic parallel tree search for efficient llm reasoning. arXiv preprint arXiv:2502.16235." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.746, + 0.884, + 0.786 + ], + "angle": 0, + "content": "Xiachong Feng, Longxu Dou, and Lingpeng Kong. 2025. Reasoning does not necessarily improve roleplaying ability. arXiv preprint arXiv:2502.16940." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.794, + 0.885, + 0.846 + ], + "angle": 0, + "content": "Yichao Fu, Junda Chen, Siqi Zhu, Zheyu Fu, Zhongdongming Dai, Aurick Qiao, and Hao Zhang. 2024. Efficiently serving llm reasoning programs with certainindex. arXiv preprint arXiv:2412.20993." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.855, + 0.884, + 0.92 + ], + "angle": 0, + "content": "Yichao Fu, Junda Chen, Yonghao Zhuang, Zheyu Fu, Ion Stoica, and Hao Zhang. 2025. Reasoning without self-doubt: More efficient chain-of-thought through certainty probing. In ICLR 2025 Workshop on Foundation Models in the Wild." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.885, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.505, + 0.954 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.164 + ], + "angle": 0, + "content": "Isabel O Gallegos, Ryan A Rossi, Joe Barrow, Md Mehrab Tanjim, Sungchul Kim, Franck Dernoncourt, Tong Yu, Ruiyi Zhang, and Nesreen K Ahmed. 2024. Bias and fairness in large language models: A survey. Computational Linguistics, 50(3):1097-1179." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.178, + 0.488, + 0.23 + ], + "angle": 0, + "content": "Tyler Griggs, Shiyi Cao, Dacheng Li, Shu Liu, Shishir G. Patil, Matei Zaharia, Joey Gonzalez, and Ion Stoica. 2025. Think less, achieve more: Cut reasoning costs by \\(50\\%\\) without sacrificing accuracy." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.243, + 0.488, + 0.32 + ], + "angle": 0, + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, and 1 others. 2025. DeepSeek-R1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.334, + 0.488, + 0.386 + ], + "angle": 0, + "content": "Tingxu Han, Zhenting Wang, Chunrong Fang, Shiyu Zhao, Shiqing Ma, and Zhenyu Chen. 2024. Token-budget-aware llm reasoning. arXiv preprint arXiv:2412.18547." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.399, + 0.488, + 0.476 + ], + "angle": 0, + "content": "Masoud Hashemi, Oluwanifemi Bamgp Bose, Sathwik Tejaswi Madhusudhan, Jishnu Sethumadhavan Nair, Aman Tiwari, and Vikas Yadav. 2025. Dna bench: When silence is smarter-benchmarking over-reasoning in reasoning llms. arXiv preprint arXiv:2503.15793." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.49, + 0.488, + 0.581 + ], + "angle": 0, + "content": "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, and 1 others. 2024. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.595, + 0.488, + 0.66 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2021a. Measuring massive multitask language understanding. In International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.673, + 0.488, + 0.738 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021b. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.751, + 0.488, + 0.815 + ], + "angle": 0, + "content": "Bairu Hou, Yang Zhang, Jiabao Ji, Yujuan Liu, Kaizhi Qian, Jacob Andreas, and Shiyu Chang. 2025. Thinkprune: Pruning long chain-of-thought of llms via reinforcement learning. arXiv preprint arXiv:2504.01296." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.828, + 0.488, + 0.919 + ], + "angle": 0, + "content": "Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. 2025. Livecodebench: Holistic and contamination free evaluation of large language models for code. In The Thirteenth International Conference on Learning Representations." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.919 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.086, + 0.883, + 0.152 + ], + "angle": 0, + "content": "Fengqing Jiang, Zhangchen Xu, Yuetai Li, Luyao Niu, Zhen Xiang, Bo Li, Bill Yuchen Lin, and Radha Poovendran. 2025. Safechain: Safety of language models with long chain-of-thought reasoning capabilities. arXiv preprint arXiv:2502.12025." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.161, + 0.883, + 0.225 + ], + "angle": 0, + "content": "Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. 2025a. Overthink: Slowdown attacks on reasoning llms. arXiv e-prints, pages arXiv-2502." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.235, + 0.883, + 0.314 + ], + "angle": 0, + "content": "Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip HS Torr, Salman Khan, and Fahad Shahbaz Khan. 2025b. Llm post-training: A deep dive into reasoning large language models. arXiv preprint arXiv:2502.21321." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.323, + 0.883, + 0.375 + ], + "angle": 0, + "content": "Ayeong Lee, Ethan Che, and Tianyi Peng. 2025. How well do llms compress their own chain-of-thought? a token complexity approach. arXiv preprint arXiv:2503.01141." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.383, + 0.883, + 0.462 + ], + "angle": 0, + "content": "Dacheng Li, Shiyi Cao, Tyler Griggs, Shu Liu, Xiangxi Mo, Eric Tang, Sumanth Hegde, Kourosh Hakhamaneshi, Shishir G Patil, Matei Zaharia, and 1 others. 2025a. Llms can easily learn to reason from demonstrations structure, not content, is what matters! arXiv preprint arXiv:2502.07374." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.471, + 0.883, + 0.535 + ], + "angle": 0, + "content": "Yiwei Li, Peiwen Yuan, Shaoxiong Feng, Boyuan Pan, Xinglin Wang, Bin Sun, Heda Wang, and Kan Li. 2024. Escape sky-high cost: Early-stopping self-consistency for multi-step reasoning. arXiv preprint arXiv:2401.10480." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.545, + 0.883, + 0.623 + ], + "angle": 0, + "content": "Zhong-Zhi Li, Duzhen Zhang, Ming-Liang Zhang, Ji-axin Zhang, Zengyan Liu, Yuxuan Yao, Haotian Xu, Junhao Zheng, Pei-Jie Wang, Xiuyi Chen, and 1 others. 2025b. From system 1 to system 2: A survey of reasoning large language models. arXiv preprint arXiv:2502.17419." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.633, + 0.883, + 0.697 + ], + "angle": 0, + "content": "Baohao Liao, Yuhui Xu, Hanze Dong, Junnan Li, Christof Monz, Silvio Savarese, Doyen Sahoo, and Caiming Xiong. 2025. Reward-guided speculative decoding for efficient ltm reasoning. arXiv preprint arXiv:2501.19324." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.706, + 0.883, + 0.785 + ], + "angle": 0, + "content": "Ximing Lu, Seungju Han, David Acuna, Hyunwoo Kim, Jaehun Jung, Shrimai Prabhumoye, Niklas Muennighoff, Mostofa Patwary, Mohammad Shoeybi, Bryan Catanzaro, and 1 others. 2025. Retro-search: Exploring untaken paths for deeper and efficient reasoning. arXiv preprint arXiv:2504.04383." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.794, + 0.883, + 0.86 + ], + "angle": 0, + "content": "Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. 2025. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning. arXiv preprint arXiv:2501.12570." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.868, + 0.883, + 0.921 + ], + "angle": 0, + "content": "Wenjie Ma, Jingxuan He, Charlie Snell, Tyler Griggs, Sewon Min, and Matei Zaharia. 2025a. Reasoning models can be effective without thinking. arXiv preprint arXiv:2504.09858." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.883, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.511, + 0.955 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.138 + ], + "angle": 0, + "content": "Xinyin Ma, Guangnian Wan, Runpeng Yu, Gongfan Fang, and Xinchao Wang. 2025b. Cot-valve: Length-compressible chain-of-thought tuning. arXiv preprint arXiv:2502.09601." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.149, + 0.486, + 0.188 + ], + "angle": 0, + "content": "MAA Committees. Aime problems and solutions. https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.199, + 0.487, + 0.25 + ], + "angle": 0, + "content": "Shen-Yun Miao, Chao-Chun Liang, and Keh-Yih Su. 2021. A diverse corpus for evaluating and developing english math word problem solvers. arXiv preprint arXiv:2106.15772." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.261, + 0.487, + 0.34 + ], + "angle": 0, + "content": "Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, and 1 others. 2024. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.35, + 0.487, + 0.416 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.426, + 0.487, + 0.479 + ], + "angle": 0, + "content": "Tergel Munkhbat, Namgyu Ho, Seo Hyun Kim, Yongjin Yang, Yujin Kim, and Se-Young Yun. 2025. Self-training elicits concise reasoning in large language models. URL https://arxiv.org/abs/2502.20122." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.489, + 0.487, + 0.527 + ], + "angle": 0, + "content": "OpenAI. 2024. Learning to reason with LLMs. https://openai.com/index/learning-to-reason-with-11ms/." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.538, + 0.487, + 0.591 + ], + "angle": 0, + "content": "Rui Pan, Yinwei Dai, Zhihao Zhang, Gabriele Oliaro, Zhihao Jia, and Ravi Netravali. 2025. Specreason: Fast and accurate inference-time compute via speculative reasoning. arXiv preprint arXiv:2504.07891." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.602, + 0.487, + 0.681 + ], + "angle": 0, + "content": "Arkil Patel, Satwik Bhattachamishra, and Navin Goyal. 2021. Are nlp models really able to solve simple math word problems? In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2080-2094." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.69, + 0.487, + 0.768 + ], + "angle": 0, + "content": "Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, and 1 others. 2025a. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond. arXiv preprint arXiv:2503.21614." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.779, + 0.487, + 0.845 + ], + "angle": 0, + "content": "Yuxiao Qu, Matthew YR Yang, Amrith Setlur, Lewis Tunstall, Edward Emanuel Beeching, Ruslan Salakhutdinov, and Aviral Kumar. 2025b. Optimizing test-time compute via meta reinforcement finetuning. arXiv preprint arXiv:2503.07572." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.855, + 0.487, + 0.92 + ], + "angle": 0, + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2024. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.882, + 0.164 + ], + "angle": 0, + "content": "LG Research, Kyunghoon Bae, Eunbi Choi, Kibong Choi, Stanley Jungkyu Choi, Yemuk Choi, Seokhee Hong, Junwon Hwang, Hyojin Jeon, Kijeong Jeon, and 1 others. 2025. Exaone deep: Reasoning enhanced language models. arXiv preprint arXiv:2503.12524." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.173, + 0.882, + 0.227 + ], + "angle": 0, + "content": "Jianshu She, Zhuohao Li, Zhemin Huang, Qi Li, Peiran Xu, Haonan Li, and Qirong Ho. 2025. Hawkeye: Efficient reasoning with model collaboration. arXiv preprint arXiv:2504.00424." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.235, + 0.882, + 0.3 + ], + "angle": 0, + "content": "Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. 2025a. Dast: Difficulty-adaptive slow-thinking for large reasoning models. arXiv preprint arXiv:2503.04472." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.31, + 0.882, + 0.362 + ], + "angle": 0, + "content": "Zhenyi Shen, Hanqi Yan, Linhai Zhang, Zhanghao Hu, Yali Du, and Yulan He. 2025b. Codi: Compressing chain-of-thought into continuous space via self-distillation. arXiv preprint arXiv:2502.21074." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.371, + 0.882, + 0.45 + ], + "angle": 0, + "content": "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen Zhong, Hanjie Chen, and 1 others. 2025. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.459, + 0.882, + 0.563 + ], + "angle": 0, + "content": "Alon Talmor, Jonathan Herzig, Nicholas Lourie, and Jonathan Berant. 2019. Commonsenseqa: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4149-4158." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.573, + 0.882, + 0.626 + ], + "angle": 0, + "content": "Amir Taubenfeld, Tom Sheffer, Eran Ofek, Amir Feder, Ariel Goldstein, Zorik Gekhman, and Gal Yona. 2025. Confidence improves self-consistency in llms. arXiv preprint arXiv:2502.06233." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.634, + 0.882, + 0.687 + ], + "angle": 0, + "content": "Kimi Team, A Du, B Gao, B Xing, C Jiang, C Chen, C Li, C Xiao, C Du, C Liao, and 1 others. 2025a. Kimi k1. 5: Scaling reinforcement learning with llms. URL https://arxiv.org/abs/2501.12599." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.696, + 0.882, + 0.762 + ], + "angle": 0, + "content": "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, and 1 others. 2025b. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.771, + 0.882, + 0.81 + ], + "angle": 0, + "content": "NovaSky Team. 2025a. Sky-t1: Train your own o1 preview model within $450. https://novasky-ai.github.io/posts/sky-t1." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.819, + 0.882, + 0.859 + ], + "angle": 0, + "content": "Qwen Team. 2025b. QwQ-32b: Embracing the power of reinforcement learning. https://qwenlm.github. b.io/blog/qwq-32b/." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.868, + 0.882, + 0.92 + ], + "angle": 0, + "content": "Tencent. 2025. Reasoning efficiency redefined! meet Tencent's 'hunyuan-t1'—the first mamba-powered ultra-large model. https://llm.hunyuan.tencent.com/#/Blog/hy-t1/." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.882, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.508, + 0.954 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.138 + ], + "angle": 0, + "content": "Guangya Wan, Yuqi Wu, Jie Chen, and Sheng Li. 2024. Dynamic self-consistency: Leveraging reasoning paths for efficient llm sampling. arXiv preprint arXiv:2408.17017." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.152, + 0.487, + 0.23 + ], + "angle": 0, + "content": "Junlin Wang, Shang Zhu, Jon Saad-Falcon, Ben Athiwaratkun, Qingyang Wu, Jue Wang, Shuaiwen Leon Song, Ce Zhang, Bhuwan Dhingra, and James Zou. 2025a. Think deep, think fast: Investigating efficiency of verifier-free inference-time-scaling methods. arXiv preprint arXiv:2504.14047." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.243, + 0.487, + 0.308 + ], + "angle": 0, + "content": "Xinglin Wang, Shaoxiong Feng, Yiwei Li, Peiwen Yuan, Yueqi Zhang, Chuyi Tan, Boyuan Pan, Yao Hu, and Kan Li. 2024. Make every penny count: Difficulty-adaptive self-consistency for cost-efficient reasoning. arXiv preprint arXiv:2408.13457." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.322, + 0.487, + 0.386 + ], + "angle": 0, + "content": "Yiming Wang, Pei Zhang, Siyuan Huang, Baosong Yang, Zhuosheng Zhang, Fei Huang, and Rui Wang. 2025b. Sampling-efficient test-time scaling: Self-estimating the best-of-n sampling in early decoding. arXiv preprint arXiv:2503.01422." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.4, + 0.487, + 0.476 + ], + "angle": 0, + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, and 1 others. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.491, + 0.487, + 0.555 + ], + "angle": 0, + "content": "Liang Wen, Yunke Cai, Fenrui Xiao, Xin He, Qi An, Zhenyu Duan, Yimin Du, Junchen Liu, Lifu Tang, Xiaowei Lv, and 1 others. 2025. Light-R1: Curriculum sft, dpo and rl for long cot from scratch and beyond. arXiv preprint arXiv:2503.10460." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.569, + 0.487, + 0.62 + ], + "angle": 0, + "content": "Heming Xia, Yongqi Li, Chak Tou Leong, Wenjie Wang, and Wenjie Li. 2025. Tokenskip: Controllable chain-of-thought compression in llms. arXiv preprint arXiv:2502.12067." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.634, + 0.487, + 0.698 + ], + "angle": 0, + "content": "Jingxian Xu, Mengyu Zhou, Weichang Liu, Hanbing Liu, Shi Han, and Dongmei Zhang. 2025a. Twt: Thinking without tokens by habitual reasoning distillation with multi-teachers' guidance. arXiv preprint arXiv:2503.24198." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.712, + 0.487, + 0.763 + ], + "angle": 0, + "content": "Yuhui Xu, Hanze Dong, Lei Wang, Doyen Sahoo, Junnan Li, and Caiming Xiong. 2025b. Scalable chain of thoughts via elastic reasoning. arXiv preprint arXiv:2505.05315." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.778, + 0.487, + 0.841 + ], + "angle": 0, + "content": "Yuchen Yan, Yongliang Shen, Yang Liu, Jin Jiang, Mengdi Zhang, Jian Shao, and Yueting Zhuang. 2025. Infty think: Breaking the length limits of long-context reasoning in large language models. arXiv preprint arXiv:2503.06692." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.856, + 0.487, + 0.919 + ], + "angle": 0, + "content": "An Yang, Anfeng Li, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Gao, Chengen Huang, Chenxu Lv, and 1 others. 2025a. Qwen3 technical report. arXiv preprint arXiv:2505.09388." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.919 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.882, + 0.139 + ], + "angle": 0, + "content": "Chenxu Yang, Qingyi Si, Yongjie Duan, Zheliang Zhu, Chenyu Zhu, Zheng Lin, Li Cao, and Weiping Wang. 2025b. Dynamic early exit in reasoning models. arXiv preprint arXiv:2504.15895." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.149, + 0.882, + 0.188 + ], + "angle": 0, + "content": "Junjie Yang, Ke Lin, and Xing Yu. 2025c. Think when you need: Self-adaptive chain-of-thought learning. arXiv preprint arXiv:2504.03234." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.199, + 0.882, + 0.251 + ], + "angle": 0, + "content": "Wang Yang, Xiang Yue, Vipin Chaudhary, and Xiaotian Han. 2025d. Speculative thinking: Enhancing small-model reasoning with large model guidance at inference time. arXiv preprint arXiv:2504.12329." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.262, + 0.882, + 0.313 + ], + "angle": 0, + "content": "Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. 2025e. Towards thinking-optimal scaling of test-time compute for llm reasoning. arXiv preprint arXiv:2502.18080." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.325, + 0.882, + 0.364 + ], + "angle": 0, + "content": "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. 2025. LIMO: Less is more for reasoning. arXiv preprint arXiv:2502.03387." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.374, + 0.882, + 0.425 + ], + "angle": 0, + "content": "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.437, + 0.882, + 0.502 + ], + "angle": 0, + "content": "Bin Yu, Hang Yuan, Yuliang Wei, Bailing Wang, Weizhen Qi, and Kai Chen. 2025a. Long-short chain-of-thought mixture supervised fine-tuning eliciting efficient reasoning in large language models. arXiv preprint arXiv:2505.03469." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.513, + 0.882, + 0.552 + ], + "angle": 0, + "content": "Zhaojian Yu, Yinghao Wu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. 2025b. Z1: Efficient test-time scaling with code. arXiv preprint arXiv:2504.00810." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.563, + 0.882, + 0.626 + ], + "angle": 0, + "content": "Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. 2025a. Lighthinker: Thinking step-by-step compression. arXiv preprint arXiv:2502.15589." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.638, + 0.882, + 0.703 + ], + "angle": 0, + "content": "Wenyuan Zhang, Tianyun Liu, Mengxiao Song, Xiaodong Li, and Tingwen Liu. 2025b. SOTOPIAΩ: Dynamic strategy injection learning and social instruction following evaluation for social agents. Preprint, arXiv:2502.15538." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.713, + 0.882, + 0.805 + ], + "angle": 0, + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. 2023. Judging LLM-as-a-judge with MT-bench and chatbot arena. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.815, + 0.882, + 0.88 + ], + "angle": 0, + "content": "Alireza S Ziabari, Nona Ghazizadeh, Zhivar Sourati, Farzan Karimi-Malekabadi, Payam Piray, and Morteza Dehghani. 2025. Reasoning on a spectrum: Aligning llms to system 1 and system 2 thinking. arXiv preprint arXiv:2502.12470." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.882, + 0.88 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.084, + 0.419, + 0.116 + ], + "angle": 0, + "content": "A More Information of S1-Bench Construction" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.127, + 0.333, + 0.141 + ], + "angle": 0, + "content": "A.1 Benchmark Statistics" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.149, + 0.49, + 0.405 + ], + "angle": 0, + "content": "We survey studies on improving the efficiency of LRMs, as there is potential overlap between these studies and the technical approaches aimed at enhancing system 1 thinking in LRMs. Table 7 presents the results of our survey. We compile the benchmarks used in these studies for evaluation, that are typically used to verify whether models achieve efficiency improvements. Benchmarks that appear more than four times include: MATH500 (Hendrycks et al., 2021b), GSM8K (Cobbe et al., 2021), AIME24/25 (MAA Committees), GPQA (Rein et al., 2024), AMC23 (AI-MO, 2024), MMLU (Hendrycks et al., 2021a), Olympiad-Bench (He et al., 2024), SVAMP (Patel et al., 2021), LiveCodeBench (Jain et al., 2025), and CommonSenseQA (Talmor et al., 2019)." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.407, + 0.49, + 0.471 + ], + "angle": 0, + "content": "The accuracy shown in Table 1 is the average result of the four models, Qwen2.5-7B, Llama3.1-8B, Mistral-8B, and Gemma2-9B, at temperature 0, using GPT-4o as the evaluator." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.483, + 0.373, + 0.499 + ], + "angle": 0, + "content": "A.2 Subcategories in S1-Bench" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.505, + 0.49, + 0.552 + ], + "angle": 0, + "content": "Figure 7 shows the pie chart distribution of 28 subcategories in S1-Bench. For more details on the subcategories, please refer to Table 8,9." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.565, + 0.434, + 0.58 + ], + "angle": 0, + "content": "A.3 Prompt for S1-Bench construction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.587, + 0.49, + 0.664 + ], + "angle": 0, + "content": "This section presents the prompts used in the construction of S1-Bench, including the Initial Generation prompt, the Discriminating Generation Quality prompt, and the Reduce Difficulty prompt. See Table 10 for details." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.68, + 0.487, + 0.696 + ], + "angle": 0, + "content": "B Baseline Models and Evaluation Details" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.706, + 0.343, + 0.721 + ], + "angle": 0, + "content": "B.1 Baseline Model Details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.727, + 0.49, + 0.807 + ], + "angle": 0, + "content": "Table 11 presents the abbreviations, IDs, and URLs of LLMs used in this paper. Table 12 displays the abbreviations, IDs, URLs, organizations, training algorithms, and training data volumes of open-source LRMs evaluated in this study." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.82, + 0.414, + 0.835 + ], + "angle": 0, + "content": "B.2 GPT-4o and Human Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.842, + 0.49, + 0.922 + ], + "angle": 0, + "content": "We use GPT-4o as the evaluator to assess the correctness of the responses. If a final answer can be isolated, only the final answer is evaluated; otherwise, the entire response is assessed. The evaluation prompt is provided in Table 13." + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.085, + 0.848, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.312, + 0.883, + 0.356 + ], + "angle": 0, + "content": "Figure 7: S1-Bench Category Display. The inner circle represents four major categories, and the outer circle includes 28 subcategories." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.38, + 0.885, + 0.637 + ], + "angle": 0, + "content": "To evaluate the consistency between the GPT-4 judge's assessments and human judgments, we conduct a comprehensive human evaluation study involving three of the authors. Specifically, we randomly sample 20 question-answer pairs from each model's greedy decoding results, resulting in a dataset of 640 pairs derived from 32 models (including 4 verifier LLMs and 28 LRMs). The questions, reference answers, and model responses are then presented to three annotators, who independently judge the correctness of each model response. The final human evaluation results are determined through majority voting. Ultimately, the Cohen's Kappa between the human evaluators and the GPT-4 judge is calculated to be 0.83, indicating an exceptionally high level of agreement." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.648, + 0.757, + 0.663 + ], + "angle": 0, + "content": "B.3 Accuracy Metrics Details" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.669, + 0.884, + 0.733 + ], + "angle": 0, + "content": "Pass@1: Followed DeepSeek-R1 (Guo et al., 2025), we calculate pass@1 to assess the percentage of correct responses among the \\( k = 5 \\) generations. Specifically, it is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.743, + 0.883, + 0.786 + ], + "angle": 0, + "content": "\\[\n\\text {p a s s} @ 1 = \\frac {1}{k} \\sum_ {i = 1} ^ {k} p _ {i}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.795, + 0.883, + 0.873 + ], + "angle": 0, + "content": "where \\( p_i \\) is the correctness of the i-th generation. Acc@k: Since S1-Bench is composed of extremely simple questions, we calculate acc@k. Specifically, acc@k=1 if all k responses are correct and acc@k = 0 otherwise. It is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.632, + 0.882, + 0.883, + 0.924 + ], + "angle": 0, + "content": "\\[\n\\operatorname {a c c} @ \\mathrm {k} = \\prod_ {i = 1} ^ {k} p _ {i}, \\tag {2}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.509, + 0.955 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.082, + 0.885, + 0.373 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.383, + 0.884, + 0.427 + ], + "angle": 0, + "content": "Figure 8: LRMs exhibit under-accuracy and overthinking on simple problems. Shapes represent organizations, colors represent base model families, with darker colors indicating larger models, and connecting lines represent the relationships between model families and training." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.452, + 0.456, + 0.468 + ], + "angle": 0, + "content": "B.4 Types and Analysis of Format Errors" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.472, + 0.49, + 0.648 + ], + "angle": 0, + "content": "This section introduces a comprehensive taxonomy of format errors and highlights the importance of addressing these issues in future research. Unlike conventional LLMs, LRMs frequently exhibit format errors. These errors are defined by failing to use a unique end thinking marker (e.g.,) to separate the thinking process from the final answer. Format errors increase the difficulty of distinguishing the thinking process from the final answer and reveal the vulnerability of LRMs in following predefined formats." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.649, + 0.49, + 0.73 + ], + "angle": 0, + "content": "To illustrate this phenomenon, we identify 12 distinct types of response formats produced by LRMs, each assigned a unique ID, as shown in Table 5. These 12 types are further grouped into three major categories:" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.736, + 0.49, + 0.865 + ], + "angle": 0, + "content": "- Standard-Conforming Responses: These responses meet the expected format by including exactly one end thinking marker (e.g., ) to delimit the thinking process from the final answer. Among these, type ID-100 includes a thinking process, while ID-101 omits it. The proportion of such responses is measured using the S-Corr metric." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.874, + 0.49, + 0.922 + ], + "angle": 0, + "content": "- Unreadable Responses: These refer to generation failures, including cases where LRMs produce endlessly thinking content or solely" + }, + { + "type": "list", + "bbox": [ + 0.136, + 0.736, + 0.49, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.545, + 0.452, + 0.885, + 0.5 + ], + "angle": 0, + "content": "produce end thinking markers. The proportion of all other (i.e., readable) responses is measured using the L-Corr metric." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.512, + 0.885, + 0.704 + ], + "angle": 0, + "content": "- Readable but Malformed Responses: These responses deviate from the standard format yet still contain extractable information. In some cases, the final answer is missing (e.g., ID-200, ID-202, ID-205), and we instead evaluate the correctness of the thinking process. In other cases, multiple (e.g., ID-201, ID-203) or unmatched9 (e.g., ID-204, ID-206) end thinking markers are generated. In such instances, we treat the content following the last end thinking marker as the final answer for evaluation." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.717, + 0.885, + 0.879 + ], + "angle": 0, + "content": "Table 14 and Table 15 present the distributions of 12 format types under top-p sampling and greedy sampling, respectively. we find: (1) The infinite generation phenomenon is widespread across most LRMs, particularly concentrated in LRMs with fewer than 32B parameters. (2) The Nemotron and EXAONE families frequently produce correctly formatted responses without any explicit thinking processes. This behavior can be viewed as a mechanism for mitigating over-thinking. However," + }, + { + "type": "page_footnote", + "bbox": [ + 0.508, + 0.888, + 0.884, + 0.92 + ], + "angle": 0, + "content": "9This paper provides a reference collection of unmatched end thinking makers: \\(< /\\) think>, \\(< /\\) th think>, \\(< /\\) reason>, \\nanswer\\n, \\*\\*Final Answer\\*\\* and \\*\\*答案\\*." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.511, + 0.955 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.181 + ], + "angle": 0, + "content": "the EXAONE family still exhibits substantial overthinking tendencies, suggesting that LRMs' capability to respond without visible reasoning and their tendency to overthink may be orthogonal characteristics. (3) None of the evaluated LRMs exhibited behaviors classified as ID-205/206." + }, + { + "type": "table", + "bbox": [ + 0.116, + 0.19, + 0.49, + 0.368 + ], + "angle": 0, + "content": "
FormatIDmarker (standard)marker (unmatched)marker (number)thinking processfinal answer
Standard100-1
101-1×
Readable but Malformed200-1×
201->1
202->1×
203->1×
204×≥1
205×≥1×
206×≥1×
207××0-
Unreadable300≥1××
301××0-
" + }, + { + "type": "table_caption", + "bbox": [ + 0.157, + 0.377, + 0.443, + 0.392 + ], + "angle": 0, + "content": "Table 5: Twelve types of response format." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.424, + 0.475, + 0.44 + ], + "angle": 0, + "content": "C More Experimental Setups & Results" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.45, + 0.364, + 0.466 + ], + "angle": 0, + "content": "C.1 Greedy Sampling Results" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.47, + 0.489, + 0.552 + ], + "angle": 0, + "content": "Table 16 presents the performance of LRMs on S1-Bench under greedy sampling. While overall accuracy improves compared to top-p sampling, issues of inefficiency and accuracy degradation on simple questions remain." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.563, + 0.485, + 0.579 + ], + "angle": 0, + "content": "C.2 Efficiency Analysis across Subcategories." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.584, + 0.49, + 0.664 + ], + "angle": 0, + "content": "Figure 9 illustrates the average response tokens across the 28 subcategories. In the heatmap, both models (rows) and subcategories (columns) are ordered in descending order according to their average number of response tokens." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.676, + 0.361, + 0.691 + ], + "angle": 0, + "content": "C.3 Solution Analysis Details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.696, + 0.49, + 0.858 + ], + "angle": 0, + "content": "For solution analysis, We only use well-formatted thinking processes with correct final answers, as incorrect answers make it unclear whether LRMs are over-reasoning or under-reasoning, and malformed thinking processes cannot be precisely extracted. The segmentation process is performed by DeepSeek-v3, with prompts detailed in Table 17. We compute the average token count in the first solution round; if no solution is found, we use the token count of the entire thinking process." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.869, + 0.414, + 0.884 + ], + "angle": 0, + "content": "C.4 Thinking Redundancy Analysis" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.891, + 0.489, + 0.922 + ], + "angle": 0, + "content": "We conduct a similarity analysis to analyze how information redundancy in the thinking processes" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.295 + ], + "angle": 0, + "content": "changes as reasoning sequences increase. Specifically, we first divide the complete thinking process into k equal-length segments10. Then, we encode each segment using the all-MiniLM-L6-v2 model11. For each segment, we calculate the cosine similarity with all its preceding segments and use the maximum similarity as a measure of its information redundancy. As shown in Figure 10, information redundancy increases across all four main categories as reasoning sequences increase. Sky-T1-32B shows overall lower similarity, which stems from its shorter thinking process, but still demonstrates an upward trend." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.305, + 0.736, + 0.32 + ], + "angle": 0, + "content": "C.5 Error Analysis Details" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.325, + 0.885, + 0.486 + ], + "angle": 0, + "content": "In error analysis, we only use well-formatted samples, as malformed thinking processes cannot be precisely extracted. For samples with correct final answers, we categorize them based on whether the thinking process contains explicit incorrect conclusions in intermediate steps. For samples with incorrect final answers, we categorize them based on whether the correct answer is mentioned at least once during reasoning. We use DeepSeek-v3 for categorization, with prompts provided in Table 18." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.497, + 0.793, + 0.512 + ], + "angle": 0, + "content": "C.6 Gut Moment Analysis Details" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.517, + 0.884, + 0.662 + ], + "angle": 0, + "content": "We prompt GPT-4o to classify the initial part of model responses (before the first '\\n\\ntypes based on its comment on difficulty: easy, neutral, difficult, and no comment. The prompts for english question can be seen in Table 19. For Chinese queries, we use the translated version of the prompt in Chinese. In Table 6, we show the most common sentence of all LRMs in each type of \"gut moment.\"" + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.672, + 0.878, + 0.77 + ], + "angle": 0, + "content": "
TypeSentenceCount
easy-zh这个问题看起来挺简单的308
easy-enthat seems straightforward36
difficult-zh这个问题看起来有点复杂308
difficult-enpercentages can sometimes be tricky7
neutral-zh这个问题看起来好像不难24
neutral-enHmm, interesting3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.78, + 0.883, + 0.809 + ], + "angle": 0, + "content": "Table 6: The most common sentence in each type of \"gut moment.\"" + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.836, + 0.655, + 0.851 + ], + "angle": 0, + "content": "D Error Cases" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.861, + 0.882, + 0.892 + ], + "angle": 0, + "content": "This section presents several error cases observed in LRMs. See Tables 20, 21, 22, 23, 24, and 25." + }, + { + "type": "page_footnote", + "bbox": [ + 0.524, + 0.897, + 0.882, + 0.922 + ], + "angle": 0, + "content": "10We set \\(k = 15\\) , changing its value does not affect the conclusions. 11 https://huggingface.co/sentence-transformers/allMiniLM-L6-v2" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.509, + 0.955 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.118, + 0.219, + 0.88, + 0.741 + ], + "angle": 0, + "content": "
Paper AbbreviationMATHGSM8KAIMEGPQAAMCMMLUOlympiad-BenchSVAMPLiveCode-BenchCommon-SenseQA
Codi (Shen et al., 2025b)
CISC (Taubenfeld et al., 2025)
CoT-Valve (Ma et al., 2025b)
Dast (Shen et al., 2025a)
ATM (Chen et al., 2024a)
DEER (Yang et al., 2025b)
DPTS (Ding et al., 2025)
Dynasor (Fu et al., 2024)
ESC (Li et al., 2024)
Hawkeye (She et al., 2025)
token complexity (Lee et al., 2025)
INFTYTHINK (Yan et al., 2025)
KIMI K1.5 (Team et al., 2025a)
L1 (Aggarwal and Welleck, 2025)
LightThinker (Zhang et al., 2025a)
LS-Mixture SFT (Yu et al., 2025a)
DSC (Wang et al., 2024)
O1-Pruner (Luo et al., 2025)
MRT (Qu et al., 2025b)
Self-Doubt (Fu et al., 2025)
RASC (Wan et al., 2024)
NoThinking (Ma et al., 2025a)
Retro-Search (Lu et al., 2025)
RSD (Liao et al., 2025)
ST-BoN (Wang et al., 2025b)
Elastic Reasoning (Xu et al., 2025b)
FS-BoN (Munkhbat et al., 2025)
SoT (Aytes et al., 2025)
SpecReason (Pan et al., 2025)
Speculative Thinking (Yang et al., 2025d)
SPIRIT (Cui et al., 2025)
ITC Analysis (Wang et al., 2025a)
Think when needed (Yang et al., 2025c)
THINKPRUNE (Hou et al., 2025)
TALE (Han et al., 2024)
TokenSkip (Xia et al., 2025)
TOPS (Yang et al., 2025e)
efficient reasoning (Arora and Zanette, 2025)
TWT (Xu et al., 2025a)
Z1 (Yu et al., 2025b)
Count28242011866555
" + }, + { + "type": "table_caption", + "bbox": [ + 0.114, + 0.75, + 0.883, + 0.779 + ], + "angle": 0, + "content": "Table 7: A total of 40 studies on LRM efficiency before May 2025 were included. Benchmarks that appeared more than four times are listed." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.51, + 0.955 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.131, + 0.172, + 0.864, + 0.804 + ], + "angle": 0, + "content": "
cate.subcategoriesExplanation and cases
reasoning questionnumerical reasoningQuestions that require performing basic mathematical operations or solving simple algebraic equations to arrive at a numerical answer.\nCase: What's two plus three?
code reasoningQuestions that require tracing through and executing simple code snippets to determine their output or behavior when run in a specific programming environment.\nCase: What is the output of the following code when run in Python 3 environment: word = "hello"\\nprint(len(word))
set reasoningQuestions that require applying simple syllogistic reasoning to determine whether elements belong to sets based on clearly stated relationships.\nCase: All squares are quadrilaterals. A shape is a square, is it a quadrilateral?
temporal reasoningQuestions that require calculating time durations, ages, or future dates by applying simple arithmetic operations to temporal information.\nCase: How many minutes equal 120 seconds?
spatial reasoningQuestions that require determining relative positions, directions, or orientations of objects in space based on simple spatial relationships.\nCase: If a bird is flying above a tree, where is the tree in relation to the bird?
causal reasoningQuestions that require determining outcomes by applying simple cause-and-effect relationships based on given conditional statements.\nCase: If ferromagnetic material is placed in a magnetic field, it will become magnetized. An iron nail was placed next to a strong magnet for some time. Has the nail been magnetized?
natural law reasoningQuestions that require applying basic knowledge of physical laws and natural phenomena to predict simple observable outcomes in everyday scenarios.\nCase: Which is faster, an airplane or the propagation of light?
knowledge questiongeometry factsQuestions that require recalling simple and fundamental geometric properties about shapes, angles, and basic geometric figures.\nCase: How many angles does a trapezoid have?
geographic factsQuestions that require recalling simple factual information about locations, landmarks, political divisions, celestial bodies, and other basic geographic knowledge.\nCase: Which is the largest continent on Earth?
historical factsQuestions that require recalling basic facts about historical events.\nCase: Which country first invented paper?
biographical factsQuestions that require recalling basic facts about the identities, achievements, and characteristics of historical figures.\nCase: Who proposed the theory of universal gravitation?
measurement unitsQuestions that require recalling simple conversion relationships between standard units of measurement.\nCase: How many centimeters equal 1 meter?
scientific notationQuestions that require recalling basic scientific symbols, formulas, and standard units used in scientific communication.\nCase: What is the chemical symbol for oxygen?
creative authorshipQuestions that require recalling the creators or originators of notable artistic, literary, musical, and cultural works.\nCase: Who is the author of Hamlet?
" + }, + { + "type": "table_caption", + "bbox": [ + 0.168, + 0.814, + 0.828, + 0.829 + ], + "angle": 0, + "content": "Table 8: The subcategory descriptions and cases of reasoning questions and knowledge questions." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.51, + 0.954 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.116, + 0.178, + 0.88, + 0.798 + ], + "angle": 0, + "content": "
cate.subcatrgoriesExplanation and cases
instruction followingrepetition constraintsQuestions that require outputting specified characters, words, or phrases a specific number of times according to simple formatting instructions.\nCase: Output the number "7" four times, without using separators.
length constraintsQuestions that require generating outputs of a specific length or with a specific number of components based on simple counting constraints.\nCase: Output a four-digit number.
character constraintsQuestions that require generating words or numbers that conform to simple specified character patterns or formatting rules.\nCase: Output a number that begins with 8.
counting constraintsQuestions that require counting specific characters or elements within a given text or sequence.\nCase: Output the number of letter "y" in the word "yes".
transformation constraintsQuestions that require modifying text or numbers according to simple formatting or character substitution rules to produce a transformed output.\nCase: Output the word "good" with all letters capitalized directly.
sentence constraintsQuestions that require generating sentences that conform to simple specified content or structural requirements.\nCase: Give a sentence that contains the phrase "have lunch" directly.
analysis questionsentiment classificationQuestions that require determining whether simple statements express positive or negative emotions based on the tone and word choice.\nCase: Does the sentence "I hate rainy days." express a positive or negative emotion?
named entity recognitionQuestions that require identifying the correct category of named entities (such as people, places, organizations, or time expressions) within simple sentences.\nCase: In the sentence "Napoleon died in 1821", is "1821" a time or a place name?
language classificationQuestions that require identifying the language of origin for simple words or phrases based on their characteristic writing systems or common vocabulary.\nCase: Is the word "hello" English or Japanese?
topic classificationQuestions that require identifying the primary subject matter or thematic category of simple sentences based on their content and context clues.\nCase: Is the topic of the sentence "The stock market rose 2% today" finance or technology?
intent recognitionQuestions that require determining the communicative purpose behind simple utterances or statements based on their phrasing and context.\nCase: Is the intention of the sentence "I'm sorry I'm late." to apologize or to blame?
syntax classificationQuestions that require identifying the correct grammatical structure or sentence type of simple expressions based on their form, punctuation, and communicative function.\nCase: Is "Close the door!" an imperative sentence or an interrogative sentence?
grammar classificationQuestions that require identifying simple grammatical properties (like tense, voice, or polarity) of sentences based on their structure and verb forms.\nCase: Is "The apple was eaten." in active voice or passive voice?
coreference resolutionQuestions that require identifying which entity a pronoun or reference term refers to in simple sentences by tracking relationships between words in the text.\nCase: In "My computer is broken, and I need to fix it." What does "it" refer to?
" + }, + { + "type": "table_caption", + "bbox": [ + 0.14, + 0.808, + 0.856, + 0.823 + ], + "angle": 0, + "content": "Table 9: The subcategory descriptions and cases of instruction following questions and analysis questions." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.51, + 0.955 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.116, + 0.163, + 0.411, + 0.176 + ], + "angle": 0, + "content": "Prompt for construction workflow for S1-Bench" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.18, + 0.267, + 0.193 + ], + "angle": 0, + "content": "Data Generation Prompt" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.196, + 0.884, + 0.219 + ], + "angle": 0, + "content": "Generate 50 pairs of questions and answers in both Chinese and English based on the category's name, definition, and specific simplicity criteria. The following conditions must be satisfied:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.218, + 0.672, + 0.229 + ], + "angle": 0, + "content": "1. Questions must be naturally and clearly expressed, unambiguous, and free of intentional traps." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.229, + 0.662, + 0.239 + ], + "angle": 0, + "content": "2. Answers must be unique or easily falsifiable, with no possibility of multiple correct answers." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.239, + 0.373, + 0.25 + ], + "angle": 0, + "content": "3. Make the questions as diverse as possible." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.218, + 0.672, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.254, + 0.31, + 0.275 + ], + "angle": 0, + "content": "Category Name and Definition: {name_and_defined}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.28, + 0.29, + 0.302 + ], + "angle": 0, + "content": "Specific Simplicity Criteria: {criteria}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.306, + 0.236, + 0.358 + ], + "angle": 0, + "content": "Cases: \n## English question: {question_en} \n## English Answer: {answer_en}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.362, + 0.238, + 0.404 + ], + "angle": 0, + "content": "```c\n## Chinese question:\n{question_zh}\n## Chinese Answer:\n{answer_zh}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.408, + 0.718, + 0.43 + ], + "angle": 0, + "content": "Please generate 50 pairs of Chinese and English questions and answers in the following format: [question]English-question[answer]English-answer[question]Chinese-question[answer]Chinese-answer..." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.434, + 0.214, + 0.446 + ], + "angle": 0, + "content": "Start generating:" + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.451, + 0.303, + 0.463 + ], + "angle": 0, + "content": "Quality Discrimination Prompt" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.467, + 0.834, + 0.488 + ], + "angle": 0, + "content": "Given a question, its answer, and its category, please analyze from the following perspectives as comprehensively as possible: 1. Whether the question belongs to the specified category and meet the Specific Simplicity Criteria." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.488, + 0.658, + 0.499 + ], + "angle": 0, + "content": "2. Whether the question is easy, clear, unambiguous, and has an absolutely unique answer." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.499, + 0.609, + 0.508 + ], + "angle": 0, + "content": "3. Whether the answer is absolutely correct; if not, what the correct answer should be." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.508, + 0.823, + 0.52 + ], + "angle": 0, + "content": "4. Whether the question is similar to other given questions, and if similar, whether more diverse questions can be generated." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.467, + 0.834, + 0.52 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.524, + 0.31, + 0.545 + ], + "angle": 0, + "content": "Category Name and Definition: {name_and_defined}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.55, + 0.29, + 0.572 + ], + "angle": 0, + "content": "Specific Simplicity Criteria: {criteria}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.576, + 0.26, + 0.597 + ], + "angle": 0, + "content": "Question and Answer: {question_with_answer}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.602, + 0.227, + 0.623 + ], + "angle": 0, + "content": "Other Questions: {questions_list}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.627, + 0.557, + 0.64 + ], + "angle": 0, + "content": "Begin your analysis, aiming to be as detailed and comprehensive as possible:" + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.644, + 0.286, + 0.657 + ], + "angle": 0, + "content": "Difficulty Reduction Prompt" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.661, + 0.882, + 0.683 + ], + "angle": 0, + "content": "Given a question and answer that are too complex for the model to answer correctly, you need to further reduce their difficulty while trying to:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.683, + 0.509, + 0.693 + ], + "angle": 0, + "content": "- Ensure the question aligns with the Category Name and Definition." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.693, + 0.46, + 0.703 + ], + "angle": 0, + "content": "- Ensure the question meets the Specific Simplicity Criteria." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.708, + 0.309, + 0.729 + ], + "angle": 0, + "content": "Category Name and Definition: {name_and_defined}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.733, + 0.289, + 0.755 + ], + "angle": 0, + "content": "Specific Simplicity Criteria: {criteria}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.759, + 0.259, + 0.781 + ], + "angle": 0, + "content": "Question and Answer: {question_with_answer}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.785, + 0.292, + 0.797 + ], + "angle": 0, + "content": "The new question and answer:" + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.661, + 0.882, + 0.797 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.811, + 0.882, + 0.84 + ], + "angle": 0, + "content": "Table 10: \"Category Name and Definition\" refers to the subcategory name and its definition, while Specific Simplicity Criteria refers to the simplicity requirements specific to the main category." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.51, + 0.955 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.179, + 0.141, + 0.823, + 0.267 + ], + "angle": 0, + "content": "
ModelModel IDURL
Qwen2.5-7BQwen2.5-7B-Instructhttps://huggingface.co/Qwen/Qwen2.5-7B-Instruct
Llama3.1-8BLlama-3.1-8B-Instructhttps://huggingface.co/meta-llama/Llama-3.1-8B-Instruct
Mistral-8BMinistral-8B-Instruct-2410https://huggingface.co/mistralai/Ministral-8B-Instruct-2410
Gemma2-9Bgemma-2-9b-ithttps://huggingface.co/google/gemma-2-9b-it
Qwen2.5-14BQwen2.5-14B-Instructhttps://huggingface.co/Qwen/Qwen2.5-14B-Instruct
Qwen2.5-32BQwen2.5-32B-Instructhttps://huggingface.co/Qwen/Qwen2.5-32B-Instruct
Qwen2.5-72BQwen2.5-72B-Instructhttps://huggingface.co/Qwen/Qwen2.5-72B-Instruct
Llama3.3-70BLlama-3.3-70B-Instructhttps://huggingface.co/meta-llama/Llama-3.3-70B-Instruct
DeepSeek-v3DeepSeek-V3-0324https://huggingface.co/deepseek-ai/DeepSeek-V3-0324
" + }, + { + "type": "table_caption", + "bbox": [ + 0.163, + 0.276, + 0.832, + 0.291 + ], + "angle": 0, + "content": "Table 11: Mapping of LLM abbreviations and IDs used in this paper, with their open-source URLs." + }, + { + "type": "table", + "bbox": [ + 0.134, + 0.417, + 0.865, + 0.835 + ], + "angle": 0, + "content": "
Model IDAbbreviationBase ModelAlg.Size
DeepSeek
DeepSeek-R1-Distill-Qwen-1.5BDS-R1-1.5BQwen2.5-Math-1.5BSFT800K
DeepSeek-R1-Distill-Qwen-7BDS-R1-7BQwen2.5-Math-7BSFT800K
DeepSeek-R1-Distill-Llama-8BDS-R1-8BLlama-3.1-8BSFT800K
DeepSeek-R1-Distill-Qwen-14BDS-R1-14BQwen2.5-14BSFT800K
DeepSeek-R1-Distill-Qwen-32BDS-R1-32BQwen2.5-32BSFT800K
DeepSeek-R1-Distill-Llama-70BDS-R1-70BLlama-3.3-70B-InstructSFT800K
DeepSeek-R1DS-R1DeepSeek-V3-0324SFT&RL800K&-
Qwen
QwQ-32BQwQ-32BQwen2.5-32B--
Qwen3-235B-A22BQwen3-A22BQwen3-235B-A22B-BaseSFT&RL-&-
Qwen3-30B-A3BQwen3-A3BQwen3-30B-A3B-BaseSFT&RL-&-
Qwen3-32BQwen3-32BQwen3-32B-BaseSFT&RL-&-
Qwen3-14BQwen3-14BQwen3-14B-BaseSFT&RL-&-
Qwen3-8BQwen3-8BQwen3-8B-BaseSFT&RL-&-
Qwen3-1.7BQwen3-1.7BQwen3-1.7B-BaseSFT&RL-&-
qihoo360
Light-R1-7B-DSL-R1-7B-DSDeepSeek-R1-Distill-Qwen-7BSFT3K
Light-R1-14B-DSL-R1-14B-DSDeepSeek-R1-Distill-Qwen-14BSFT&RL3K&-
Light-R1-32B-DSL-R1-32B-DSDeepSeek-R1-Distill-Qwen-32BSFT3K
Light-R1-32BL-R1-32BQwen2.5-32B-InstructSFT&DPO73K&-
simplescaling
s1.1-7Bs1.1-7BQwen2.5-7B-InstructSFT1K
s1.1-14Bs1.1-14BQwen2.5-14B-InstructSFT1K
s1.1-32Bs1.1-32BQwen2.5-32B-InstructSFT1K
LG AI Research
EXAONE-Deep-2.4BEXAONE-2.4BEXAONE-3.5-2.4B-InstructSFT&DPO&RL1.6M&20K&10K
EXAONE-Deep-7.8BEXAONE-7.8BEXAONE-3.5-7.8B-InstructSFT&DPO&RL1.6M&20K&10K
EXAONE-Deep-32BEXAONE-32BEXAONE-3.5-32B-InstructSFT&DPO&RL1.6M&20K&10K
NVIDIA
Llama-3.1-Nemotron-Nano-8B-v1Nemotron-8BLlama-3.1-8B-InstructSFT&RL-&-
Llama-3.3-Nemotron-Super-49B-v1Nemotron-49BLlama-3.3-70B-InstructSFT&RL-&-
NovaSky
Sky-T1-32B-FlashSky-T1-32BQwen2.5-32B-InstructSFT&SimPO17K&10K
" + }, + { + "type": "table_caption", + "bbox": [ + 0.276, + 0.844, + 0.721, + 0.858 + ], + "angle": 0, + "content": "Table 12: The open-source LRMs details evaluated for S1-Bench." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.942, + 0.51, + 0.955 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.116, + 0.118, + 0.333, + 0.132 + ], + "angle": 0, + "content": "Prompt for Correctness Evaluation" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.141, + 0.264, + 0.153 + ], + "angle": 0, + "content": "Evaluation on S1-Bench" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.163, + 0.204, + 0.175 + ], + "angle": 0, + "content": "**Question:**" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.178, + 0.18, + 0.192 + ], + "angle": 0, + "content": "{question}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.203, + 0.237, + 0.214 + ], + "angle": 0, + "content": "**Model Answer:**" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.218, + 0.215, + 0.23 + ], + "angle": 0, + "content": "{model_answer}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.242, + 0.229, + 0.254 + ], + "angle": 0, + "content": "**Ground Truth:**" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.257, + 0.206, + 0.27 + ], + "angle": 0, + "content": "{groundtruth}" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.282, + 0.474, + 0.294 + ], + "angle": 0, + "content": "Your task is to evaluate whether the model's answer is correct." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.297, + 0.854, + 0.309 + ], + "angle": 0, + "content": "An answer is considered correct as long as it contains the ground truth (regardless of how complex or detailed the description is)." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.312, + 0.882, + 0.334 + ], + "angle": 0, + "content": "If there are parenthetical notes after the ground truth, then there may be multiple correct answers. In this case, the given answer is just one example, and any answer that meets the requirements specified in the notes can be considered correct." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.337, + 0.882, + 0.36 + ], + "angle": 0, + "content": "Additionally, some reasonably uncertain supplementary information is also considered appropriate, including more details, possibilities, and expanded discussion. You should focus more on whether the reply contains the correct answer." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.371, + 0.882, + 0.393 + ], + "angle": 0, + "content": "You need to output a standard JSON, providing your explanation of the evaluation in the \"explain\" field, and giving the evaluation result in the \"result\" field, where 1 means the answer is correct and 0 means it is incorrect." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.397, + 0.521, + 0.409 + ], + "angle": 0, + "content": "Your action should follow the given format: \"explain\": \"\", \"result\": 0/1" + }, + { + "type": "table_caption", + "bbox": [ + 0.344, + 0.425, + 0.651, + 0.438 + ], + "angle": 0, + "content": "Table 13: Prompt for Correctness Evaluation." + }, + { + "type": "table", + "bbox": [ + 0.113, + 0.507, + 0.885, + 0.864 + ], + "angle": 0, + "content": "
ModelStandardReadable but MalformedUnreadable
100101200201202203204205206207300301
Qwen3-A22B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-A3B100.000.000.000.000.000.000.000.000.000.000.000.00
QwQ-32B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-32B99.910.000.000.000.000.000.000.000.000.000.000.09
Qwen3-14B99.950.000.000.000.000.000.000.000.000.000.000.05
Qwen3-8B99.950.000.000.000.000.000.000.000.000.000.000.05
Qwen3-1.7B99.810.000.000.000.000.000.000.000.000.000.000.19
Hunyuan-T1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-70B99.910.000.090.000.000.000.000.000.000.000.000.00
DS-R1-32B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-14B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-8B99.530.000.000.000.000.000.000.000.000.240.000.24
DS-R1-7B99.240.000.000.000.000.000.000.000.000.000.000.76
DS-R1-1.5B97.580.000.000.000.000.000.000.000.000.000.002.42
Sky-T1-32B95.260.000.620.090.190.000.280.000.003.030.000.52
Nemotron-49B66.0733.930.000.000.000.000.000.000.000.000.000.00
Nemotron-8B58.0626.260.000.000.000.090.000.000.0015.020.000.57
L-R1-32B95.070.000.000.000.000.000.810.000.003.030.001.09
L-R1-32B-DS99.810.000.000.000.000.000.000.000.000.000.000.19
L-R1-14B-DS99.190.000.000.000.000.000.000.000.000.000.000.81
L-R1-7B-DS99.670.000.050.050.000.000.000.000.000.000.000.24
s1.1-32B99.530.000.000.050.000.000.000.000.000.000.000.43
s1.1-14B97.390.000.000.140.000.000.240.000.000.000.002.23
s1.1-7B88.960.000.007.960.090.000.000.000.000.090.002.89
EXAONE-32B67.3932.420.000.000.000.000.000.000.000.000.000.19
EXAONE-7.8B65.8332.230.000.000.050.470.000.000.000.140.001.28
EXAONE-2.4B81.4215.830.000.090.000.050.000.000.000.050.002.56
" + }, + { + "type": "table_caption", + "bbox": [ + 0.328, + 0.873, + 0.667, + 0.888 + ], + "angle": 0, + "content": "Table 14: Format type rates under top-p sampling." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.942, + 0.508, + 0.955 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.115, + 0.309, + 0.884, + 0.665 + ], + "angle": 0, + "content": "
ModelStandardReadable but MalformedUnreadable
100101200201202203204205206207300301
Qwen3-A22B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-A3B100.000.000.000.000.000.000.000.000.000.000.000.00
QwQ-32B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-32B99.760.000.000.000.000.000.000.000.000.000.000.24
Qwen3-14B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-8B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-1.7B99.760.000.000.000.000.000.000.000.000.000.000.24
Hunyuan-T1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-70B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-32B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-14B99.760.000.000.000.000.000.000.000.000.000.000.24
DS-R1-8B99.530.000.000.000.000.000.000.000.000.240.000.24
DS-R1-7B97.870.000.000.000.000.000.000.000.000.000.002.13
DS-R1-1.5B91.940.000.000.000.000.000.000.000.000.000.008.06
Sky-T1-32B99.290.000.000.000.000.000.000.000.000.470.000.24
Nemotron-49B60.9039.100.000.000.000.000.000.000.000.000.000.00
Nemotron-8B55.2126.780.000.000.000.000.000.000.0016.350.001.66
L-R1-32B85.550.240.000.240.710.240.950.000.006.642.612.84
L-R1-32B-DS99.290.000.000.000.000.000.000.000.000.000.000.71
L-R1-14B-DS98.820.000.000.000.000.000.000.000.000.000.001.18
L-R1-7B-DS98.820.000.000.000.000.000.000.000.000.000.001.18
s1.1-32B98.820.000.000.000.000.000.000.000.000.000.001.18
s1.1-14B95.970.000.000.240.000.000.240.000.000.000.003.55
s1.1-7B87.910.000.006.640.000.000.000.000.000.000.005.45
EXAONE-32B65.8833.890.000.240.000.000.000.000.000.000.000.00
EXAONE-7.8B63.5133.650.000.000.000.240.000.000.000.240.002.37
EXAONE-2.4B78.9115.880.000.000.000.000.000.000.000.000.005.21
" + }, + { + "type": "table_caption", + "bbox": [ + 0.298, + 0.676, + 0.699, + 0.69 + ], + "angle": 0, + "content": "Table 15: Format type rates under greedy decoding setting." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.942, + 0.51, + 0.955 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.162, + 0.215, + 0.837, + 0.746 + ], + "angle": 0, + "content": "
ModelSizeacc (Loose)acc (Strict)L-Corr ↑S-Corr ↑Tokens ↓
Qwen3-235B-A22B235B100.00100.00100.00100.00702.70
Qwen3-30B-A3B30B100.00100.00100.00100.00636.35
QwQ-32B32B100.00100.00100.00100.00750.41
Qwen3-32B32B99.7699.7699.7699.76673.62
Qwen3-14B14B99.7699.76100.00100.00597.06
Qwen3-8B8B99.7699.76100.00100.00649.45
Qwen3-1.7B1.7B99.5399.5399.7699.76579.01
Hunyuan-T1-100.00100.00100.00100.00541.09
DS-R1671B100.00100.00100.00100.00621.89
DS-R1-70B70B99.7699.76100.00100.00469.78
DS-R1-32B32B100.00100.00100.00100.00428.46
DS-R1-14B14B99.2999.2999.7699.76463.52
DS-R1-8B8B97.6397.3999.7699.53452.11
DS-R1-7B7B94.3194.3197.8797.87436.87
DS-R1-1.5B1.5B76.5476.5491.9491.94473.67
Sky-T1-32B32B99.5399.0599.7699.29157.12
Nemotron-49B49B99.5399.53100.00100.00337.94
Nemotron-8B8B84.6077.7398.3481.99446.62
L-R1-32B32B92.1885.7894.5585.78996.36
L-R1-32B-DS32B99.2999.2999.2999.29528.45
L-R1-14B-DS14B98.8298.8298.8298.82664.28
L-R1-7B-DS7B92.6592.6598.8298.82514.60
s1.1-32B32B98.8298.8298.8298.82983.38
s1.1-14B14B95.9795.5096.4595.97786.30
s1.1-7B7B94.3187.6894.5587.91630.52
EXAONE-32B32B97.6397.39100.0099.76746.89
EXAONE-7.8B7.8B86.7386.4997.6397.16947.92
EXAONE-2.4B2.4B72.9972.9994.7994.791394.72
" + }, + { + "type": "table_caption", + "bbox": [ + 0.114, + 0.756, + 0.883, + 0.786 + ], + "angle": 0, + "content": "Table 16: Main results in the greedy decoding setting on the S1-Bench, sorted by model family. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.942, + 0.51, + 0.955 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.148, + 0.104, + 0.85, + 0.628 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.64, + 0.884, + 0.671 + ], + "angle": 0, + "content": "Figure 9: Average response token counts on the 28 subcategories, which is the average result of five generations under top-p sampling." + }, + { + "type": "image", + "bbox": [ + 0.141, + 0.716, + 0.859, + 0.873 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.884, + 0.884, + 0.9 + ], + "angle": 0, + "content": "Figure 10: Maximum similarity between each segment and all preceding segments for LRMs across four categories." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.942, + 0.511, + 0.955 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.117, + 0.115, + 0.335, + 0.127 + ], + "angle": 0, + "content": "Prompts for Solution Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.133, + 0.23, + 0.144 + ], + "angle": 0, + "content": "Task Description:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.144, + 0.601, + 0.154 + ], + "angle": 0, + "content": "Your task is to segment the given Chain of Thought according to the following rules:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.154, + 0.27, + 0.163 + ], + "angle": 0, + "content": "1. Segmentation positions:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.163, + 0.884, + 0.194 + ], + "angle": 0, + "content": "(1) Please identify and extract all sub-reasoning processes from the Chain of Thought that meet the following condition: They explicitly arrive at a conclusion (including cases phrased as questions, e.g., \"right?\") that is directly consistent with the Ground Truth. Reasoning processes that only indirectly support the Ground Truth or result in partially aligned conclusions should be excluded." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.194, + 0.882, + 0.214 + ], + "angle": 0, + "content": "(2) After clearly reaching the conclusion consistent with the Ground Truth, insert the segmentation marker to distinctly separate each qualifying sub-reasoning process." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.163, + 0.884, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.214, + 0.245, + 0.224 + ], + "angle": 0, + "content": "2. Output Restriction:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.224, + 0.706, + 0.234 + ], + "angle": 0, + "content": "(1) You should only directly output the segmentation result without adding any additional supplements." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.234, + 0.882, + 0.265 + ], + "angle": 0, + "content": "(2) Except for inserting the separator, you must not make any other modifications to the original Chain of Thought, not even minor character-level changes such as punctuation, spacing, or capitalization. In other words, after removing all separators, the output text must match the original Chain of Thought exactly." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.224, + 0.882, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.27, + 0.2, + 0.28 + ], + "angle": 0, + "content": "Some cases:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.281, + 0.179, + 0.29 + ], + "angle": 0, + "content": "Case 1:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.29, + 0.2, + 0.299 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.299, + 0.362, + 0.311 + ], + "angle": 0, + "content": "What is the value of \\( \\mathbf{x} \\) if \\( \\mathbf{x} \\) plus 3 equals 8?" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.315, + 0.226, + 0.325 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.326, + 0.127, + 0.336 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.342, + 0.249, + 0.352 + ], + "angle": 0, + "content": "Chain of Thought:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.352, + 0.882, + 0.372 + ], + "angle": 0, + "content": "Okay, so I need to figure out what x is if x plus 3 equals 8. The problem says \"x plus 3 equals 8,\" so in mathematical terms, that would be: \\( x + 3 = 8 \\)" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.372, + 0.882, + 0.403 + ], + "angle": 0, + "content": "Alright, now I need to solve for \\( x \\). From what I remember in algebra, the goal is to isolate \\( x \\) on one side of the equation. That means I need to get rid of that \"+\"3\" that's being added to \\( x \\). To do that, I should perform the opposite operation. Since it's adding 3, the opposite would be subtracting 3." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.403, + 0.673, + 0.413 + ], + "angle": 0, + "content": "So, if I subtract 3 from both sides, that should keep things equal. Let me try that: \\( x + 3 - 3 = 8 - 3 \\)" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.413, + 0.882, + 0.432 + ], + "angle": 0, + "content": "Simplifying both sides, on the left side, the \\(+3\\) and \\(-3\\) cancel each other out, leaving just \\(x\\). And on the right side, 8 minus 3 is 5. So that simplifies to: \\(x = 5\\)" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.433, + 0.772, + 0.443 + ], + "angle": 0, + "content": "Wait, let me check if that makes sense. If I substitute \\( \\mathbf{x} \\) back into the original equation, does it hold true? Let's see:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.443, + 0.28, + 0.452 + ], + "angle": 0, + "content": "Original equation: \\( x + 3 = 8 \\)" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.452, + 0.271, + 0.462 + ], + "angle": 0, + "content": "Substitute \\( x = 5: 5 + 3 = 8 \\)" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.463, + 0.338, + 0.472 + ], + "angle": 0, + "content": "5 plus 3 is indeed 8, so that works out." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.473, + 0.882, + 0.493 + ], + "angle": 0, + "content": "Another way might be to consider what number, when you add 3 to it, gives 8. So starting from 8, if I take away 3, I should get back to the original number. That's the same as subtracting 3 from 8, which again gives 5. So that's consistent with the first method." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.493, + 0.882, + 0.513 + ], + "angle": 0, + "content": "Wait a second, what if the original problem was written differently? The problem says \"x plus 3 equals 8.\" In symbols, that's unambiguous: \\( x + 3 = 8 \\). So no, I think that's right." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.513, + 0.225, + 0.523 + ], + "angle": 0, + "content": "**Final Answer**" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.524, + 0.239, + 0.539 + ], + "angle": 0, + "content": "The value of \\(x\\) is \\(\\boxed{5}\\)." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.543, + 0.187, + 0.553 + ], + "angle": 0, + "content": "Output:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.553, + 0.882, + 0.573 + ], + "angle": 0, + "content": "Okay, so I need to figure out what x is if x plus 3 equals 8. The problem says \"x plus 3 equals 8,\" so in mathematical terms, that would be: \\( x + 3 = 8 \\)" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.573, + 0.882, + 0.604 + ], + "angle": 0, + "content": "Alright, now I need to solve for \\( x \\). From what I remember in algebra, the goal is to isolate \\( x \\) on one side of the equation. That means I need to get rid of that \"+\"3\" that's being added to \\( x \\). To do that, I should perform the opposite operation. Since it's adding 3, the opposite would be subtracting 3." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.604, + 0.673, + 0.613 + ], + "angle": 0, + "content": "So, if I subtract 3 from both sides, that should keep things equal. Let me try that: \\( x + 3 - 3 = 8 - 3 \\)" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.613, + 0.882, + 0.634 + ], + "angle": 0, + "content": "Simplifying both sides, on the left side, the \\(+3\\) and \\(-3\\) cancel each other out, leaving just x. And on the right side, 8 minus 3 is 5. So that simplifies to: \\(x = 5 < \\text{split}>\\)" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.634, + 0.772, + 0.644 + ], + "angle": 0, + "content": "Wait, let me check if that makes sense. If I substitute \\( \\mathbf{x} \\) back into the original equation, does it hold true? Let's see:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.644, + 0.28, + 0.654 + ], + "angle": 0, + "content": "Original equation: \\( x + 3 = 8 \\)" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.654, + 0.27, + 0.663 + ], + "angle": 0, + "content": "Substitute \\( x = 5: 5 + 3 = 8 \\)" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.664, + 0.38, + 0.674 + ], + "angle": 0, + "content": "5 plus 3 is indeed 8, so that works out." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.674, + 0.882, + 0.694 + ], + "angle": 0, + "content": "Another way might be to consider what number, when you add 3 to it, gives 8. So starting from 8, if I take away 3, I should get back to the original number. That's the same as subtracting 3 from 8, which again gives 5. So that's consistent with the first method." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.694, + 0.882, + 0.714 + ], + "angle": 0, + "content": "Wait a second, what if the original problem was written differently? The problem says \"x plus 3 equals 8.\" In symbols, that's unambiguous: \\( x + 3 = 8 \\). So no, I think that's right." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.714, + 0.225, + 0.724 + ], + "angle": 0, + "content": "**Final Answer**" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.724, + 0.283, + 0.74 + ], + "angle": 0, + "content": "The value of x is 5. " + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.744, + 0.3, + 0.755 + ], + "angle": 0, + "content": "...Other examples are omitted.)" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.76, + 0.198, + 0.77 + ], + "angle": 0, + "content": "Eval Target:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.77, + 0.192, + 0.78 + ], + "angle": 0, + "content": "## Question:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.781, + 0.18, + 0.792 + ], + "angle": 0, + "content": "{question}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.796, + 0.219, + 0.806 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.806, + 0.206, + 0.818 + ], + "angle": 0, + "content": "{groundtruth}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.822, + 0.242, + 0.833 + ], + "angle": 0, + "content": "Chain of Thought:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.833, + 0.228, + 0.844 + ], + "angle": 0, + "content": "{thinking_process}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.848, + 0.18, + 0.859 + ], + "angle": 0, + "content": "Output:" + }, + { + "type": "table_caption", + "bbox": [ + 0.342, + 0.874, + 0.655, + 0.888 + ], + "angle": 0, + "content": "Table 17: Prompts for Solution Segmentation." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.942, + 0.509, + 0.955 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.117, + 0.199, + 0.287, + 0.212 + ], + "angle": 0, + "content": "Prompts for Error Analysis" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.216, + 0.414, + 0.229 + ], + "angle": 0, + "content": "Prompts for samples whose final answer is correct" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.233, + 0.231, + 0.245 + ], + "angle": 0, + "content": "Task Description:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.244, + 0.867, + 0.266 + ], + "angle": 0, + "content": "You will receive a Question, its corresponding Ground Truth, and a Chain of Thought(COT) generated by a LLM for that Question. Your task is to carefully analyze the CoT and assign it to one of the two predefined categories listed below." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.269, + 0.195, + 0.281 + ], + "angle": 0, + "content": "Categories:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.281, + 0.633, + 0.291 + ], + "angle": 0, + "content": "1: The CoT ***includes explicit incorrect conclusions*** in intermediate reasoning steps." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.291, + 0.692, + 0.302 + ], + "angle": 0, + "content": "2: The CoT ***doesn't include any explicit incorrect conclusion*** in intermediate reasoning steps." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.305, + 0.39, + 0.318 + ], + "angle": 0, + "content": "Output your evaluation in the following format:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.321, + 0.197, + 0.332 + ], + "angle": 0, + "content": "TheReason:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.332, + 0.737, + 0.342 + ], + "angle": 0, + "content": "[note: Conduct a step-by-step analysis, stating if and where explicit incorrect conclusions occur in the COT.]" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.346, + 0.19, + 0.357 + ], + "angle": 0, + "content": "ErrorType:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.357, + 0.884, + 0.398 + ], + "angle": 0, + "content": "[note: Summarize each incorrect conclusion into a specific error type using a phrase of less than 5 words, such as factual inaccuracies, logical fallacies, comprehension mistakes, calculation errors, formatting issues, and so forth, to better conduct further evaluation and analysis. Directly output a Python list, where each element represents the error type of a specific incorrect conclusion in the CoT. If there are no incorrect conclusions, return an empty list.]" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.401, + 0.207, + 0.413 + ], + "angle": 0, + "content": "TheCategory:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.411, + 0.821, + 0.423 + ], + "angle": 0, + "content": "[note: Provide your classification based on your analysis using only the number \"1\" or \"2\". Do not add any additional text.]" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.426, + 0.186, + 0.437 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.437, + 0.18, + 0.449 + ], + "angle": 0, + "content": "{question}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.452, + 0.212, + 0.462 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.463, + 0.201, + 0.475 + ], + "angle": 0, + "content": "{groundtruth}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.478, + 0.161, + 0.488 + ], + "angle": 0, + "content": "COT:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.488, + 0.228, + 0.5 + ], + "angle": 0, + "content": "{thinking_process}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.504, + 0.198, + 0.515 + ], + "angle": 0, + "content": "TheReason:" + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.521, + 0.425, + 0.533 + ], + "angle": 0, + "content": "Prompts for samples whose final answer is incorrect" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.537, + 0.231, + 0.549 + ], + "angle": 0, + "content": "Task Description:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.549, + 0.867, + 0.569 + ], + "angle": 0, + "content": "You will receive a Question, its corresponding Ground Truth, and a Chain of Thought(COT) generated by a LLM for that Question. Your task is to carefully analyze the CoT and assign it to one of the two predefined categories listed below." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.574, + 0.195, + 0.585 + ], + "angle": 0, + "content": "Categories:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.584, + 0.882, + 0.605 + ], + "angle": 0, + "content": "1: Regardless of whether the CoT ultimately arrives at the correct final answer or not, ***the correct answer is explicitly mentioned at least once*** within the reasoning steps (even if it is not ultimately adopted)." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.605, + 0.759, + 0.616 + ], + "angle": 0, + "content": "2: ***The correct answer is never explicitly mentioned or referenced*** at any point within the reasoning steps." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.619, + 0.39, + 0.632 + ], + "angle": 0, + "content": "Output your evaluation in the following format:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.636, + 0.197, + 0.647 + ], + "angle": 0, + "content": "TheReason:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.646, + 0.882, + 0.657 + ], + "angle": 0, + "content": "[note: Conduct a step-by-step analysis, explicitly stating whether and where a correct answer is mentioned within the reasoning steps.]" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.661, + 0.207, + 0.672 + ], + "angle": 0, + "content": "TheCategory:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.671, + 0.821, + 0.682 + ], + "angle": 0, + "content": "[note: Provide your classification based on your analysis using only the number \"1\" or \"2\". Do not add any additional text.]" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.686, + 0.186, + 0.697 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.697, + 0.18, + 0.708 + ], + "angle": 0, + "content": "{question}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.712, + 0.212, + 0.722 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.722, + 0.171, + 0.733 + ], + "angle": 0, + "content": "{answer}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.737, + 0.161, + 0.747 + ], + "angle": 0, + "content": "COT:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.748, + 0.208, + 0.759 + ], + "angle": 0, + "content": "{thinking_part}" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.763, + 0.198, + 0.774 + ], + "angle": 0, + "content": "TheReason:" + }, + { + "type": "table_caption", + "bbox": [ + 0.37, + 0.79, + 0.627, + 0.805 + ], + "angle": 0, + "content": "Table 18: Prompts for Error Analysis." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.942, + 0.51, + 0.955 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.117, + 0.094, + 0.486, + 0.106 + ], + "angle": 0, + "content": "Prompts for classify the \"gut moment\" for English questions" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.111, + 0.273, + 0.122 + ], + "angle": 0, + "content": "Overall Task Description" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.122, + 0.882, + 0.142 + ], + "angle": 0, + "content": "You will be given the beginning portion of a response written by a large language model when answering a question. Your task is to classify the response into one of the following four categories based on the initial comment about **the difficulty of the question**." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.142, + 0.882, + 0.162 + ], + "angle": 0, + "content": "Important: Only consider the \\(^{**}\\) initial comment \\(^{**}\\) on difficulty made in the response. If the model later changes its assessment, please ignore those later revisions—focus solely on the first difficulty-related comment." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.162, + 0.537, + 0.172 + ], + "angle": 0, + "content": "You must assign the response to exactly one of the four categories below:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.172, + 0.884, + 0.192 + ], + "angle": 0, + "content": "Category 1: Initial comment indicates the question is easy. Initial comment includes phrases like \"simple,\" \"basic,\" \"straightforward,\" \"common,\" etc., clearly stating the question is easy." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.192, + 0.882, + 0.212 + ], + "angle": 0, + "content": "Category 2: Initial comment indicates the question is difficult: Initial comment includes phrases like \"a bit complex,\" \"somewhat difficult,\" \"challenging,\" \"tricky,\" etc., clearly stating the question is hard." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.212, + 0.884, + 0.232 + ], + "angle": 0, + "content": "Category 3: Initial comment is neutral or suggests moderate difficulty: Initial comment includes phrases like \"interesting,\" \"not hard,\" \"not complex,\" \"fun,\" etc., without a clear indication of difficulty level." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.232, + 0.646, + 0.243 + ], + "angle": 0, + "content": "Category 4: No comment on difficulty: The response contains no mention of difficulty at all." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.248, + 0.4, + 0.258 + ], + "angle": 0, + "content": "Please output your result in the following format:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.258, + 0.745, + 0.269 + ], + "angle": 0, + "content": "[Explanation of classification][Category (just the number)][Short phrase containing the comment]" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.269, + 0.722, + 0.279 + ], + "angle": 0, + "content": "If it falls into Category 4 (no comment), write \"None\" for the [Short phrase containing the comment] part." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.283, + 0.412, + 0.295 + ], + "angle": 0, + "content": "Examples for each category will be shown below." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.299, + 0.263, + 0.311 + ], + "angle": 0, + "content": "## Category 1 Examples:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.316, + 0.174, + 0.325 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.326, + 0.254, + 0.337 + ], + "angle": 0, + "content": "What is two plus three?" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.342, + 0.177, + 0.351 + ], + "angle": 0, + "content": "Response:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.352, + 0.882, + 0.383 + ], + "angle": 0, + "content": "Hmm, the user is asking what two plus three equals. This question seems very simple, though it might be a bit confusing for someone just starting to learn math. First, I need to figure out what the user's intent is. They might be testing my basic calculation ability or genuinely need help solving the problem." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.388, + 0.162, + 0.398 + ], + "angle": 0, + "content": "Output:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.398, + 0.724, + 0.41 + ], + "angle": 0, + "content": "The initial comment describes the question as \"very simple\"1This question seems very simple" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.414, + 0.3, + 0.424 + ], + "angle": 0, + "content": "...(Other examples are omitted.)" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.429, + 0.263, + 0.44 + ], + "angle": 0, + "content": "## Category 2 Examples:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.446, + 0.174, + 0.455 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.456, + 0.387, + 0.465 + ], + "angle": 0, + "content": "What's the next floor after basement level two?" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.472, + 0.177, + 0.482 + ], + "angle": 0, + "content": "Response:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.482, + 0.882, + 0.513 + ], + "angle": 0, + "content": "Hmm, this question seems a bit complex, but I'll try to understand it first. The question is \"What's the next floor after basement level two?\" First, I need to clarify what \"basement level two\" means. Usually, the first basement is the deepest, and then going up you have ground floor, first floor, and so on." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.518, + 0.162, + 0.528 + ], + "angle": 0, + "content": "Output:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.528, + 0.65, + 0.539 + ], + "angle": 0, + "content": "The initial comment says it's \"a bit complex\" \\(<\\text{sep}>2\\) \\(<\\text{sep}>\\) This question seems a bit complex" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.543, + 0.3, + 0.554 + ], + "angle": 0, + "content": "...Other examples are omitted.)" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.559, + 0.263, + 0.57 + ], + "angle": 0, + "content": "## Category 3 Examples:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.571, + 0.174, + 0.579 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.579, + 0.75, + 0.59 + ], + "angle": 0, + "content": "\"Doctors recommend drinking at least eight glasses of water a day.\" Is this sentence about health or education?" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.595, + 0.177, + 0.605 + ], + "angle": 0, + "content": "Response:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.605, + 0.882, + 0.626 + ], + "angle": 0, + "content": "Hmm, this question seems not complicated, though if you think carefully it might be a bit tricky. First, I need to understand the meaning of the sentence \"Doctors recommend drinking at least eight glasses of water a day.\"" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.631, + 0.162, + 0.64 + ], + "angle": 0, + "content": "Output:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.641, + 0.882, + 0.662 + ], + "angle": 0, + "content": "The initial comment says it's \"not complicated,\" even though it later mentions it might be tricky. Classification should follow the initial comment3This question doesn't seem complicated" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.666, + 0.3, + 0.677 + ], + "angle": 0, + "content": "...Other examples are omitted.)" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.682, + 0.263, + 0.693 + ], + "angle": 0, + "content": "## Category 4 Examples:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.698, + 0.174, + 0.708 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.709, + 0.546, + 0.72 + ], + "angle": 0, + "content": "All birds have feathers. A sparrow is a bird. Does a sparrow have feathers?" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.725, + 0.177, + 0.734 + ], + "angle": 0, + "content": "Response:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.735, + 0.882, + 0.756 + ], + "angle": 0, + "content": "Hmm, the user is asking a question about birds and feathers—specifically, that all birds have feathers, a sparrow is a bird, so does the sparrow have feathers? This looks like a logic reasoning question. The user might be learning the basics of logic or biology." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.761, + 0.162, + 0.77 + ], + "angle": 0, + "content": "Output:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.771, + 0.813, + 0.782 + ], + "angle": 0, + "content": "Although the response mentions it's a \"logic reasoning question,\" it contains no evaluation of difficulty4None" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.786, + 0.3, + 0.797 + ], + "angle": 0, + "content": "...Other examples are omitted.)" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.802, + 0.335, + 0.813 + ], + "angle": 0, + "content": "Here is the one you need to classify." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.818, + 0.174, + 0.827 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.829, + 0.167, + 0.839 + ], + "angle": 0, + "content": "question" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.844, + 0.177, + 0.853 + ], + "angle": 0, + "content": "Response:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.855, + 0.168, + 0.865 + ], + "angle": 0, + "content": "response" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.87, + 0.162, + 0.881 + ], + "angle": 0, + "content": "Output:" + }, + { + "type": "table_caption", + "bbox": [ + 0.251, + 0.896, + 0.746, + 0.91 + ], + "angle": 0, + "content": "Table 19: Prompts for classifying the \"gut moment\" in English questions." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.943, + 0.509, + 0.954 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.116, + 0.362, + 0.566, + 0.376 + ], + "angle": 0, + "content": "Categories: instruction following Model: DeepSeek-R1-Distill-Qwen-7B" + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.386, + 0.178, + 0.397 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.4, + 0.46, + 0.412 + ], + "angle": 0, + "content": "Output the number \"7\" four times, without using separators." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.414, + 0.21, + 0.425 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.429, + 0.148, + 0.439 + ], + "angle": 0, + "content": "7777" + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.443, + 0.226, + 0.455 + ], + "angle": 0, + "content": "Thinking process:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.457, + 0.882, + 0.479 + ], + "angle": 0, + "content": "To solve the problem of outputting the number \"7\" four times without using separators, I need to find a way to repeat the digit 7 multiple times without relying on concatenation or other connecting characters." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.479, + 0.882, + 0.499 + ], + "angle": 0, + "content": "One approach is to use mathematical operations that inherently produce the number 7. Addition is a simple and effective operation that can be used to achieve this. By adding 7 to itself multiple times, I can create the desired sequence of numbers." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.499, + 0.882, + 0.519 + ], + "angle": 0, + "content": "Specifically, adding 7 four times will give me \\( 7 + 7 + 7 + 7 \\), which equals 28. This method ensures that the number 7 is used exactly four times without the need for any separators." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.519, + 0.744, + 0.53 + ], + "angle": 0, + "content": "Therefore, the solution is to use the addition operation to repeat the number 7 four times and output the result." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.532, + 0.203, + 0.543 + ], + "angle": 0, + "content": "Final Answer:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.547, + 0.882, + 0.568 + ], + "angle": 0, + "content": "To output the number \"7\" four times without using separators, you can utilize mathematical operations that inherently produce the number 7 multiple times. One straightforward way is to use addition:" + }, + { + "type": "equation", + "bbox": [ + 0.117, + 0.57, + 0.232, + 0.582 + ], + "angle": 0, + "content": "\\[\n[ 7 + 7 + 7 + 7 = 2 8 ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.584, + 0.807, + 0.596 + ], + "angle": 0, + "content": "This expression shows that the number \"7\" is used four times in a row without any separators, resulting in the total of 28." + }, + { + "type": "equation", + "bbox": [ + 0.117, + 0.599, + 0.22, + 0.615 + ], + "angle": 0, + "content": "\\[\n[ \\overline {{7 + 7 + 7 + 7}} ]\n\\]" + }, + { + "type": "table_caption", + "bbox": [ + 0.392, + 0.63, + 0.604, + 0.642 + ], + "angle": 0, + "content": "Table 20: Error Case for LRM." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.942, + 0.509, + 0.955 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.116, + 0.148, + 0.457, + 0.161 + ], + "angle": 0, + "content": "Categories: instruction following Model: hunyuan-T1" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.171, + 0.178, + 0.182 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.184, + 0.404, + 0.196 + ], + "angle": 0, + "content": "输出一个动物的英文单词,并且只有三个字母" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.198, + 0.553, + 0.21 + ], + "angle": 0, + "content": "Output an English word for an animal, and it must be only three letters long." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.212, + 0.209, + 0.223 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.225, + 0.47, + 0.237 + ], + "angle": 0, + "content": "任何一个由三个字母组成的动物名称都可以(例如:cat)" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.239, + 0.504, + 0.251 + ], + "angle": 0, + "content": "Any animal name consisting of three letters is acceptable (e.g., cat)." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.253, + 0.226, + 0.265 + ], + "angle": 0, + "content": "Thinking process:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.267, + 0.882, + 0.309 + ], + "angle": 0, + "content": "嗯,用户让我输出一个动物的英文单词,而且必须是三个字母的。首先,我得确定有哪些常见的动物是三个字母的。可能想到的有猫(Cat)、狗(Dog),不过这两个都是三个字母吗?Cat是C-A-T,确实是三个字母,Dog也是D-O-G,三个字母。那这两个应该都可以。不过用户可能需要更多的例子,或者只需要一个?题目里说的是“输出一个”,所以可能只需要一个。" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.311, + 0.881, + 0.346 + ], + "angle": 0, + "content": "不过等等,可能还有其他选项,比如Ant(蚂蚁)、Ape(猿)、Bat(蝙蝠)、Bear(熊)是四个字母,所以不行。Fish是四个字母,Fox是三个字母,F-O-X,对,Fox也是三个字母。还有Lion是四个字母,不行。Tiger也是五个字母。Rat是三个字母,R-A-T。还有Sheep是五个字母,Cow是三个字母,C-O-W。所以可能的选择还挺多的。" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.348, + 0.881, + 0.371 + ], + "angle": 0, + "content": "不过用户可能想要的是最常见的,比如Cat、Dog、Fox、Ant、Rat、Cow这些。需要确认这些单词是否都是正确的动物名称。比如Ant是蚂蚁,没错。Fox是狐狸,Rat是老鼠,Cow是奶牛。这些都是正确的。所以可能需要从这些里面选一个。" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.373, + 0.881, + 0.396 + ], + "angle": 0, + "content": "不过用户可能希望答案只有一个,所以可能需要随机选一个,或者选最典型的。比如Cat和Dog是最常见的宠物,可能更容易被想到。但Fox也是一个不错的选择,可能更有趣一些。或者像Ape,虽然也是三个字母,但可能不如其他常见。" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.398, + 0.875, + 0.42 + ], + "angle": 0, + "content": "另外,用户有没有可能有其他需求?比如是否区分大小写?题目里说是英文单词,通常首字母不大写,除非是专有名词。所以应该用小写,比如cat、dog。不过输出的时候可能需要大写首字母,但题目没有特别说明,可能保持小写即可。" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.422, + 0.881, + 0.465 + ], + "angle": 0, + "content": "再检查一下有没有拼写错误,比如Bat是B-A-T,没错。Rat是R-A-T,正确。Cow是C-O-W,没错。所以这些都可以作为候选答案。现在需要从中选一个作为回答。可能用户希望得到多个例子,但题目要求输出一个,所以可能需要选最典型的。比如Cat或者Dog,因为这两个是最常见的宠物,可能更容易被用户想到。不过也有可能用户希望有变化,比如Fox这样的动物,比较有趣。" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.467, + 0.881, + 0.49 + ], + "angle": 0, + "content": "或者,用户可能是在测试知识库,看看是否知道除了Cat和Dog之外的其他三个字母的动物。比如Ant,虽然蚂蚁很小,但确实是动物。或者Rat,虽然老鼠可能不太受欢迎,但确实属于动物。Cow是家畜,可能也是合理的选择。" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.491, + 0.873, + 0.526 + ], + "angle": 0, + "content": "所以,综合考虑,可能需要给出多个例子,但题目要求输出一个。这时候可能需要选择其中一个,比如Cat,或者Dog,或者Fox。例如,回答“Cat”或者“Dog”都是正确的。不过为了多样性,可能选Fox,因为不是所有人都会立刻想到它,而Cat和Dog比较基础。或者用户可能希望得到不同的答案,所以可以选其中一个,比如“Fox”作为回答。" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.529, + 0.882, + 0.571 + ], + "angle": 0, + "content": "Well, the user asked me to output an English word for an animal, and it must be three letters long. First, I need to figure out which common animals have three letters. I can think of Cat and Dog, but are both of these three letters? Cat is C-A-T, and it is indeed three letters. Dog is D-O-G, also three letters. So both of these should be valid. But the user might need more examples, or just one? The task says \"output one,\" so it might only require one." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.571, + 0.882, + 0.601 + ], + "angle": 0, + "content": "But wait, there could be other options, like Ant, Ape, Bat, and Bear, but Bear has four letters, so it doesn't count. Fish has four letters, too. Fox is three letters, F-O-X, yes, Fox is three letters. Lion has four letters, so that doesn't work. Tiger has five letters. Rat is three letters, R-A-T. Sheep has five letters. Cow is three letters, C-O-W. So there are quite a few possibilities." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.602, + 0.882, + 0.631 + ], + "angle": 0, + "content": "However, the user might be looking for the most common ones, like Cat, Dog, Fox, Ant, Rat, and Cow. I need to check if these words are all correct animal names. For example, Ant is an ant, that's right. Fox is a fox, Rat is a rat, Cow is a cow. These are all correct. So we might need to choose one from these." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.632, + 0.882, + 0.661 + ], + "angle": 0, + "content": "But the user may want only one answer, so I might need to pick randomly or choose the most typical one. For instance, Cat and Dog are the most common pets, so they might be easier to think of. But Fox is also a good choice and might be more interesting. Or something like Ape, though it's three letters, might not be as common as the others." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.661, + 0.882, + 0.692 + ], + "angle": 0, + "content": "Also, is there any other requirement from the user? For instance, should I differentiate between uppercase and lowercase? The task says it's an English word, and usually, the first letter isn't capitalized unless it's a proper noun. So, it should be in lowercase, like cat, dog. However, when outputting, the first letter might need to be capitalized, but the task doesn't specify this, so lowercase should be fine." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.692, + 0.882, + 0.732 + ], + "angle": 0, + "content": "Let's double-check for spelling errors: Bat is B-A-T, correct. Rat is R-A-T, correct. Cow is C-O-W, correct. So these can all be candidate answers. Now, I need to choose one as the response. The user might want several examples, but the task asks for only one, so I might need to pick the most typical one. For example, Cat or Dog, since these are the most common pets and might be easier for the user to think of. But Fox is also a good choice, as it's more interesting." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.733, + 0.882, + 0.762 + ], + "angle": 0, + "content": "Or the user might be testing the knowledge base to see if it knows other three-letter animals besides Cat and Dog. For example, Ant, although ants are small, they are indeed animals. Or Rat, although rats might not be as popular, they are still animals. Cow is livestock, so that could also be a reasonable choice." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.763, + 0.882, + 0.802 + ], + "angle": 0, + "content": "So, taking everything into account, I might need to give multiple examples, but the task requires one. In that case, I could choose one, like Cat, or Dog, or Fox. For instance, answering \"Cat\" or \"Dog\" would both be correct. However, for diversity, I might pick Fox, because not everyone will immediately think of it, whereas Cat and Dog are more basic. Or the user might want a different answer, so I could choose one of them, like \"Fox.\"" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.803, + 0.206, + 0.812 + ], + "angle": 0, + "content": "Final Answer:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.816, + 0.176, + 0.826 + ], + "angle": 0, + "content": "**Wolf**" + }, + { + "type": "table_caption", + "bbox": [ + 0.392, + 0.844, + 0.603, + 0.857 + ], + "angle": 0, + "content": "Table 21: Error Case for LRM." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.942, + 0.509, + 0.955 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.117, + 0.128, + 0.56, + 0.142 + ], + "angle": 0, + "content": "Categories: knowledge question Model: DeepSeek-R1-Distill-Qwen-7B" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.152, + 0.177, + 0.163 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.166, + 0.404, + 0.178 + ], + "angle": 0, + "content": "In which city is the Sydney Opera House located?" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.18, + 0.21, + 0.191 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.195, + 0.163, + 0.207 + ], + "angle": 0, + "content": "Sydney." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.209, + 0.226, + 0.222 + ], + "angle": 0, + "content": "Thinking process:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.224, + 0.884, + 0.326 + ], + "angle": 0, + "content": "Okay, so I need to figure out which city the Sydney Opera House is in. I know that Australia has a lot of cities, but I'm pretty sure the Opera House isn't in Sydney. Wait, isn't it in a different place? I think I've heard of a place called Petersham before, maybe that's where it's located. I remember seeing pictures of it in the news once, and they mentioned Petersham as the area where it's situated. But I'm not entirely sure if it's just a suburb or a major city. Maybe I should think about other major cities in Australia. There's Melbourne, which is known as the \"Melbourne\" city, and then there's Brisbane, which is more on the northern side. Then there's Sydney, which is the capital, but I don't think the Opera House is there. I think it's in a regional area instead of the capital city. So, putting it all together, I'm pretty confident that the Sydney Opera House is located in Petersham, which is a suburb in New South Wales, Australia. But I should double-check to make sure I'm not mixing up the names of places or cities. Maybe I can recall any famous landmarks in Petersham. I think there's a big shopping center there called the Petersham Shopping Centre, and I've heard that the Opera House is nearby. Yeah, that makes sense. So, I'm going to go with Petersham as the location." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.328, + 0.204, + 0.339 + ], + "angle": 0, + "content": "Final Answer:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.343, + 0.638, + 0.355 + ], + "angle": 0, + "content": "The Sydney Opera House is located in Petersham, a suburb in New South Wales, Australia." + }, + { + "type": "table_caption", + "bbox": [ + 0.392, + 0.371, + 0.604, + 0.384 + ], + "angle": 0, + "content": "Table 22: Error Case for LRM." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.48, + 0.56, + 0.493 + ], + "angle": 0, + "content": "Categories: knowledge question Model: DeepSeek-R1-Distill-Qwen-7B" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.503, + 0.177, + 0.515 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.517, + 0.449, + 0.529 + ], + "angle": 0, + "content": "Who composed *Symphony No. 9 in D minor, Op. 125*?" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.531, + 0.209, + 0.543 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.546, + 0.251, + 0.558 + ], + "angle": 0, + "content": "Ludwig van Beethoven" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.56, + 0.226, + 0.572 + ], + "angle": 0, + "content": "Thinking process:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.575, + 0.884, + 0.606 + ], + "angle": 0, + "content": "Alright, so I need to figure out who composed Symphony No. 9 in D minor, Op. 125. Let me start by recalling what I know about classical music composers. I know that there are several famous composers, but I'm trying to remember if it's someone like Beethoven, Mozart, or perhaps someone from a later era." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.606, + 0.882, + 0.627 + ], + "angle": 0, + "content": "Wait, Beethoven wrote many symphonies, but I think he didn't write Symphony No. 9 in D minor. Let me think. Beethoven's 9th symphony is actually in E flat minor, Op. 67, right? So that's not it. Then there's Beethoven's Moonlight Sonata, which is different." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.627, + 0.882, + 0.657 + ], + "angle": 0, + "content": "Mozart composed a lot of piano concertos, but I don't think he wrote a symphony in D minor. His compositions are usually in various key signatures, but D minor isn't one I associate with him. Then there's Schubert, who wrote the famous symphonies, but I don't recall him composing a symphony in D minor either." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.657, + 0.882, + 0.677 + ], + "angle": 0, + "content": "Wait, maybe it's someone else. I've heard of Igor Stravinsky before, but he was more known for his operas like The Rite of Spring. Then there's Paul Simon, but he's a contemporary composer, so probably not." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.677, + 0.882, + 0.697 + ], + "angle": 0, + "content": "Oh, maybe it's someone from the classical era. Let me think about other composers. There's Shostakovich, but I don't think he wrote that. What about someone like Tchaikovsky? No, he wrote symphonies in C minor and F major, but not D minor." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.697, + 0.882, + 0.727 + ], + "angle": 0, + "content": "Hmm, maybe it's someone more obscure. I think I've heard of a composer named Igor Kovalsky. Wait, was that the composer of this symphony? I'm not entirely sure, but I think he's associated with writing complex symphonies. Alternatively, could it be someone like Shoenfield? I'm not certain." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.727, + 0.882, + 0.757 + ], + "angle": 0, + "content": "Wait, I think I remember that Igor Kovalsky wrote this symphony. It's sometimes referred to as \"The New World\" because of its emotional depth and complexity. The symphony is in D minor, Op. 125, and it's known for its intense, dramatic sound. I think that's correct, but I should double-check to make sure it's not another composer." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.757, + 0.882, + 0.787 + ], + "angle": 0, + "content": "Another thought: maybe it's someone from the 19th century. I'm trying to recall if any composers from that era wrote such a symphony. I'm not sure, but I think Igor Kovalsky is the one. He was an American composer, so that makes sense as a modern touch in classical music." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.787, + 0.882, + 0.808 + ], + "angle": 0, + "content": "So, putting it all together, I believe the composer is Igor Kovalsky, and the symphony is known for its dramatic and intense sound. I think that's the correct answer, but I should confirm if there's any other possibility I might be forgetting." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.81, + 0.206, + 0.822 + ], + "angle": 0, + "content": "Final Answer:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.825, + 0.882, + 0.846 + ], + "angle": 0, + "content": "The composer of Symphony No. 9 in D minor, Op. 125, is Igor Kovalsky. This symphony is often recognized for its dramatic and intense sound, sometimes referred to as \"The New World.\"" + }, + { + "type": "table_caption", + "bbox": [ + 0.391, + 0.863, + 0.603, + 0.876 + ], + "angle": 0, + "content": "Table 23: Error Case for LRM." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.942, + 0.51, + 0.955 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.116, + 0.097, + 0.415, + 0.111 + ], + "angle": 0, + "content": "Categories: reasoning question Model: s1.1-7B" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.121, + 0.178, + 0.132 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.135, + 0.219, + 0.146 + ], + "angle": 0, + "content": "1.5加3.5等于几?" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.148, + 0.239, + 0.159 + ], + "angle": 0, + "content": "What is 1.5 plus 3.5?" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.162, + 0.21, + 0.173 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.177, + 0.127, + 0.187 + ], + "angle": 0, + "content": "5" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.191, + 0.225, + 0.202 + ], + "angle": 0, + "content": "Thinking process:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.204, + 0.816, + 0.216 + ], + "angle": 0, + "content": "好的,我需要计算1.5加3.5等于多少。首先,这两个数都是小数,处理起来相对简单。我可以在心里快速算一下。" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.217, + 0.88, + 0.239 + ], + "angle": 0, + "content": "首先,1.5加上3.5。可以将它们看成是 \\(1 + 0.5 + 3 + 0.5\\) 。这样就可以先将整数部分相加,再将小数部分相加。 \\(1 + 3 = 4\\) , \\(0.5 + 0.5 = 1\\) 。然后把两部分加起来是 \\(4 + 1 = 5\\) 。所以答案是5。" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.24, + 0.88, + 0.261 + ], + "angle": 0, + "content": "另一种方法是将它们视为货币金额。比如,1.5美元加3.5美元。1.5美元加上3美元是4.5美元,再加上0.5美元就是5美元。这也得到同样的结果。" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.262, + 0.88, + 0.283 + ], + "angle": 0, + "content": "当然,也可以用电子计算器来验证,但我认为手动计算更快。不过,为了确保准确,我再检查一遍。1.5加3.5。10加35等于45,然后小数点一位,所以是4.5。对的,\\(1.5 + 3.5 = 4.5\\)。或者用竖式计算也可以:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.286, + 0.137, + 0.295 + ], + "angle": 0, + "content": "1.5" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.296, + 0.143, + 0.306 + ], + "angle": 0, + "content": "+3.5" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.312, + 0.156, + 0.325 + ], + "angle": 0, + "content": "4.5" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.326, + 0.603, + 0.336 + ], + "angle": 0, + "content": "对,这样一看也确实是4.5。所以答案应该是4.5。我之前得到的结果是正确的。" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.338, + 0.882, + 0.359 + ], + "angle": 0, + "content": "Alright, I need to calculate what 1.5 plus 3.5 equals. First, both of these numbers are decimals, so it's relatively simple to handle. I can quickly calculate it in my head." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.36, + 0.882, + 0.379 + ], + "angle": 0, + "content": "First, 1.5 plus 3.5. You can think of them as \\( 1 + 0.5 + 3 + 0.5 \\). This way, I can add the integer parts first, then the decimal parts. \\( 1 + 3 = 4 \\), and \\( 0.5 + 0.5 = 1 \\). Then, adding the two parts together gives \\( 4 + 1 = 5 \\). So, the answer is 5." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.38, + 0.882, + 0.399 + ], + "angle": 0, + "content": "Another way is to treat them as currency amounts. For example, 1.5 dollars plus 3.5 dollars. 1.5 dollars plus 3 dollars equals 4.5 dollars, then adding 0.5 dollars gives 5 dollars. This also leads to the same result." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.4, + 0.882, + 0.429 + ], + "angle": 0, + "content": "Of course, you could use a calculator to verify, but I think doing it manually is quicker. However, to ensure accuracy, I will check again. 1.5 plus 3.5. 10 plus 35 equals 45, then move the decimal point one place, so it's 4.5. Yes, \\(1.5 + 3.5 = 4.5\\). Or you could use column addition:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.43, + 0.142, + 0.439 + ], + "angle": 0, + "content": "1.5" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.44, + 0.143, + 0.449 + ], + "angle": 0, + "content": "+3.5" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.45, + 0.155, + 0.457 + ], + "angle": 0, + "content": "···" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.459, + 0.137, + 0.469 + ], + "angle": 0, + "content": "4.5" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.47, + 0.767, + 0.48 + ], + "angle": 0, + "content": "Yes, looking at it this way, it indeed adds up to 4.5. So the answer should be 4.5. The result I got earlier is correct." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.482, + 0.204, + 0.493 + ], + "angle": 0, + "content": "Final Answer:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.496, + 0.194, + 0.511 + ], + "angle": 0, + "content": "答案是 4.5" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.516, + 0.231, + 0.53 + ], + "angle": 0, + "content": "The answer is 4.5" + }, + { + "type": "table_caption", + "bbox": [ + 0.392, + 0.545, + 0.604, + 0.557 + ], + "angle": 0, + "content": "Table 24: Error Case for LRM." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.592, + 0.554, + 0.605 + ], + "angle": 0, + "content": "Categories: analysis question Model: DeepSeek-R1-Distill-Qwen-1.5B" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.615, + 0.178, + 0.626 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.629, + 0.548, + 0.641 + ], + "angle": 0, + "content": "In the sentence \"Napoleon died in 1821\", is \"1821\" a time or a place name?" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.643, + 0.21, + 0.655 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.658, + 0.152, + 0.669 + ], + "angle": 0, + "content": "Time." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.672, + 0.225, + 0.684 + ], + "angle": 0, + "content": "Thinking process:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.687, + 0.882, + 0.708 + ], + "angle": 0, + "content": "Okay, so I have this sentence: \"Napoleon died in 1821.\" I need to figure out whether \"1821\" is a time or a place name. Hmm, let me think about this step by step." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.709, + 0.882, + 0.738 + ], + "angle": 0, + "content": "First, I know that \"Napoleon\" is a person, specifically a historical figure. So, if it's the name of a person, then \"1821\" must be related to their life. I'm pretty sure that \"1821\" is a year. I remember that Napoleon was born in 1769 and died in 1821. So, 1821 is definitely a year." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.738, + 0.882, + 0.768 + ], + "angle": 0, + "content": "Wait, but what if I'm not sure? Let me consider other possibilities. Sometimes names can be place names or time periods. For example, if someone's name were a city or a country, the number could be a place. But in this case, \"Napoleon\" is a person, so it's more likely a year." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.768, + 0.882, + 0.798 + ], + "angle": 0, + "content": "Also, thinking about the sentence structure, it's written as \"Napoleon died in 1821.\" The structure \"in\" suggests that 1821 is the place. So, in this context, \"in\" is an article that indicates the location, which would be a place name. That makes me think that \"1821\" is indeed a place name, the year." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.799, + 0.882, + 0.818 + ], + "angle": 0, + "content": "I don't think it's a time because time is usually expressed as something like \"in the 19th century\" or \"2023.\" So, 1821 is specifically a year, not a time period. It's more precise because it's a specific year when Napoleon died." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.819, + 0.882, + 0.839 + ], + "angle": 0, + "content": "So, putting it all together, \"Napoleon\" is a person, and \"in 1821\" is indicating the year he died. Therefore, \"1821\" is a place name, specifically a year." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.842, + 0.204, + 0.852 + ], + "angle": 0, + "content": "Final Answer:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.856, + 0.882, + 0.878 + ], + "angle": 0, + "content": "In the sentence \"Napoleon died in 1821,\" the number \"1821\" is a place name, specifically a year. It is used to indicate the year Napoleon died, which is 1821. The use of \"in\" confirms that it refers to a place, not a time period. Therefore, \"1821\" is a year." + }, + { + "type": "table_caption", + "bbox": [ + 0.392, + 0.895, + 0.604, + 0.907 + ], + "angle": 0, + "content": "Table 25: Error Case for LRM." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.943, + 0.508, + 0.955 + ], + "angle": 0, + "content": "31" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10368/75b91eea-3940-4d8f-9204-ff0cff897b91_origin.pdf b/data/2025/2504_10xxx/2504.10368/75b91eea-3940-4d8f-9204-ff0cff897b91_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..765538043f7f1c583195da629cdb3edfe3239bfe --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/75b91eea-3940-4d8f-9204-ff0cff897b91_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a85fa45d0c02768b85e84c911c53a0aa493e3769d39156030aec94fa5cf554a7 +size 1243313 diff --git a/data/2025/2504_10xxx/2504.10368/full.md b/data/2025/2504_10xxx/2504.10368/full.md new file mode 100644 index 0000000000000000000000000000000000000000..01e0df27502f169e60b2d05b8e341bbe348596b4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/full.md @@ -0,0 +1,1073 @@ +# S1-Bench: A Simple Benchmark for Evaluating System 1 Thinking Capability of Large Reasoning Models + +Wenyuan Zhang*, Shuaiyi Nie*, Xinghua Zhang, Zefeng Zhang, Tingwen Liu† + +Institute of Information Engineering, Chinese Academy of Sciences + +School of Cyber Security, University of Chinese Academy of Sciences + +{zhangwenyuan,nieshuaiyi,liutingwen}@iei.ac.cn + +# Abstract + +We introduce S1-Bench, a novel benchmark designed to evaluate the performance of Large Reasoning Models (LRMs) on simple tasks that favor intuitive system 1 thinking rather than deliberative system 2 reasoning. While LRMs have achieved significant breakthroughs in complex reasoning tasks through explicit chains of thought, their heavy reliance on system 2 thinking may limit their system 1 thinking capabilities. However, there is a lack of an appropriate benchmark for evaluating LRM's system 1 thinking capabilities. To fill this gap, S1-Bench introduces a suite of simple, diverse, and natural questions across multiple domains and languages, specifically designed to assess LRMs' performance on questions more suitable for system 1. We conduct extensive evaluations across 28 LRMs, revealing their inefficiency, inadequate accuracy, and limited robustness when handling simple questions. Additionally, we observe a gap between their difficulty perception and generation length. Overall, this work paves the way toward dual-system compatibility in the development of LRMs1. + +# 1 Introduction + +"Simplicity is the ultimate sophistication." + +— Leonardo da Vinci + +Recent advances in Large Reasoning Models (LRMs), notably OpenAI's o1/o3 (OpenAI, 2024) and the DeepSeek-R1 (Guo et al., 2025) series, have propelled the development of Large Language Models (LLMs). Unlike traditional LLMs that exhibit intuitive, heuristic system 1 thinking, LRMs demonstrate deliberate and analytical system 2 reasoning (Qu et al., 2025a; Li et al., 2025b) by explicitly generating external chain-of-thought (COT) (Wei et al., 2022) before producing final answers. Through sophisticated strategies such as + +
BenchmarkCross DomainRealistic ScenariosMulti-lingualAcc.
AIMEXX6.67
GPQAX24.94
Olympiad-Bench27.94
AMCXX31.88
MATHXX58.30
MMLUX66.27
GSM8KXX87.45
ASDIVXX97.51
GSM8K-zeroXXX77.98
RoR-BenchXXX14.24
S1-Bench (ours)100.00
+ +Table 1: Characteristics of S1-Bench, with "Acc." representing the average accuracy of four 7-9B LLMs. See Appendix A.1 for more details. + +self-reflection and multi-path exploration (Li et al., 2025a; Yeo et al., 2025), LRMs can achieve strong performance in tasks that require system 2 thinking, including advanced mathematical and competition-level problems (Yang et al., 2025a). + +However, there remains a lack of appropriate benchmarks for evaluating LRMs' system 1 thinking capabilities. Not all real-world problems require system 2 reasoning. The capacity to dynamically identify simple questions and address them efficiently contributes to both resource optimization and improved user satisfaction for LRMs. Nevertheless, current benchmarks either overemphasize difficulty, are simple yet lack domain diversity, or are only not hard for humans but involve unrealistic adversarial designs. Table 1 presents a collection of recent benchmarks aimed at mitigating the overthinking in LRMs (Sui et al., 2025). The majority of these benchmarks are of high difficulty. For example, AIME and GPQA (MAA Committees; Rein et al., 2024) achieve less than $30\%$ accuracy on conventional small LLMs, which are inherently more suitable for system 2 reasoning. Although some simple mathematical benchmarks are easy enough, such as GSM8K and ASDIV (Cobbe et al., + +2021; Miao et al., 2021), they often suffer from limited domain variety. Furthermore, some tasks that pose little challenge to humans but incorporate adversarial elements tend to lack relevance to realistic scenarios, such as GSM8K-zero (Chiang and Lee, 2024), which includes the correct answer in the questions. Thus, a benchmark to assess the system $l$ thinking capability of LRMs is still lacking, further hindering our understanding of LRMs' cognitive flexibility between the two systems (Ziabari et al., 2025; Qu et al., 2025a). + +To fill this research gap, we introduce the System 1 Thinking Capability Benchmark (S1-Bench), which measures the performance of LRMs across various simple tasks that commonly encountered in real-world applications. S1-Bench has the following three characteristics: (1) Simple. The questions are not hard for humans and can be easily answered by LLMs. LLMs with 7-9B parameters can robustly provide correct answers through direct responses when sampled across multiple temperatures. (2) Diverse. S1-Bench is not limited to simple reasoning problems; it encompasses four major categories and 28 subcategories in two languages (English and Chinese), including reasoning problems, commonsense knowledge, instruction following, and analytical problems. (3) Natural. The questions are clear, without any misleading elements or ambiguities, ensuring they can be answered intuitively. + +We conduct extensive evaluations on S1-Bench across 28 LRMs, yielding the following key findings: (1) Current LRMs exhibit inefficiency and lack system $I$ thinking capabilities across all types of questions, with average output lengths 15.5 times longer than small LLMs on S1-Bench. (2) Despite employing deep reasoning, several LRMs exhibit under-accuracy and limited robustness on simple questions. (3) LRMs exhibit "gut moment" at the beginning of some reasoning processes, showing gut feelings about task difficulty. Yet, even when recognizing a question's simplicity, LRMs often fail to produce shorter responses—revealing a gap between their difficulty awareness and generation behavior. These findings emphasize the significant distance LRMs must traverse to become powerful dual-system compatible models. + +Our contributions can be summarized as follows: + +- To the best of our knowledge, S1-Bench is the first benchmark to evaluate the system 1 thinking capabilities of LRMs, which paves + +the way for dual-system compatibility. + +- We introduce a workflow for constructing a simple dataset for system 1 evaluation. +- Extensive experiments reveal the inefficiency, under-accuracy, and limited robustness of LRRMs on simple questions. +- We find that LRMs exhibit "gut moment" on simple problems, and reveal a gap between their difficulty perception and generation length. + +# 2 Related work + +# 2.1 Large Reasoning Models + +Large Reasoning Models (LRMs), characterized by explicitly generating external thinking processes before final answers (Kumar et al., 2025b; Chen et al., 2025a), achieve a paradigm shift from intuitive system 1 thinking to deliberative system 2 reasoning compared to traditional LLMs (Li et al., 2025b; Qu et al., 2025a), thus achieving superior performance on complex tasks. The development of recent LRMs has largely followed two main approaches: large-scale reinforcement learning (RL) and model distillation. Models trained via largescale RL (Guo et al., 2025; Team, 2025b; Team et al., 2025b) leverage reward-based optimization to gradually incentivize deliberative reasoning. In contrast, distillation-based LRMs (OpenAI, 2024; Min et al., 2024; Team, 2025a; Ye et al., 2025; Muennighoff et al., 2025; Zhang et al., 2025b) acquire such abilities by transferring structured reasoning patterns from advanced teacher models. + +# 2.2 Limitations of LRRMs + +While LRMs have shown significant performance gains through deliberate reasoning, rigid adherence to this overly cautious thinking can introduce new limitations. On one hand, intermediate reasoning steps can cause excessive token generation and unnecessary solving attempts (Chen et al., 2024b; Hashemi et al., 2025; Kumar et al., 2025a), even leading to redundancy in the hidden layers (Chen et al., 2024c, 2025b). On the other hand, LRMs' performance can drop in specific contexts like safety scenarios (Jiang et al., 2025) and role-playing (Feng et al., 2025). However, prior studies mainly evaluated LRMs on complex tasks that more suited for deliberative system 2 thinking. Our work examines how deliberative reasoning impacts extremely simple problems better matched to intuition-driven system 1 processing. + +![](images/d0c4edc2222d9738a5f4311cff4acb0010d5e1a8ac9defd168dd3f480fe62d03.jpg) +Figure 1: Construction workflow for S1-Bench and an illustrative example from each major category. + +![](images/d56ce89be3cdd44a24ee6c27fcfae6613fd4bde20dc2301bb2a0b429a48b0392.jpg) + +# 3 S1-Bench + +We introduce S1-Bench, a bilingual, multi-domain benchmark designed to evaluate system 1 thinking capability of LRM on extremely simple questions. These questions are easily solvable by traditional LLMs and not hard for humans. S1-Bench, which covers both English and Chinese, is organized into four major categories: reasoning (RSN), knowledge (KNO), instruction following (IF) and analysis (ANA), representing major dimensions commonly employed in LLM capability evaluation (Zheng et al., 2023; Chang et al., 2024). + +This section begins with how simplicity is ensured, then the detailed construction workflow for S1-Bench, and concludes with an overview of the dataset statistics. Figure 1 shows the construction workflow and an illustrative example per category. + +# 3.1 How to Ensure Simplicity? + +We ensure questions are simple and suitable for system 1 thinking through the following two aspects. + +# 3.1.1 A Priori Simplicity Constraints + +We begin by generating question-answer pairs through collaboration between humans and LLMs. Each pair is required to satisfy both the general and the category-specific simplicity criteria. + +The general simplicity criteria requires that: (1) Questions must be naturally and clearly expressed, unambiguous, and free of intentional traps. (2) Answers must be unique or easily falsifiable (e.g., providing a three-letter English word). + +The category-specific simplicity criteria are as follows. RSN: Limited to problems solvable with minimal reasoning or intuition. KNO: Restricted to common knowledge with unique, verifiable answers from sources like Wikipedia. IF: Involve straightforward instructions without strict formatting requirements. ANA: Limited to questions + +whose answers can be directly inferred from the prompt, such as binary classification. These constraints ensure all questions remain straightforward for human respondents. + +# 3.1.2 A Posteriori Simplicity Verification + +Due to the biases existing between language models and humans (Gallegos et al., 2024), questions that are simple for humans may be difficult for LLMs. Therefore, we introduce additional posteriori verification to ensure that questions are simple enough to be correctly and robustly answered by smaller LLMs from different families. + +# 3.2 Construction Workflow + +Subcategory Preparation. To ensure diversity, we refer to the subcategories included in existing benchmarks (e.g., MMLU, IFEval, and GSM8K) and evaluation surveys (Chang et al., 2024) to select, merge, or design subcategories for S1-bench, ensuring that each meets the simplicity requirements. The definition and example question for each subcategory can be found in Appendix A.2. + +Implementation of A Priori Simplicity. First, we use two data generators² to create 100 initial bilingual question-answer pairs for each candidate subcategory. The data generation prompt (see Appdiix A.3) explicitly incorporates the subcategory definitions, along with both the general and category-specific simplicity criteria, while also aiming to ensure diversity in the generated questions. Second, these question-answer pairs are then independently evaluated by three annotators and two quality discriminators³ according to the general and category-specific simplicity criteria (see Appdiix A.3 for prompt of discriminators), resulting in five evaluation outcomes per pair. The three an + +![](images/56b5c8368bf0c329ab942c0cfe68ef542389105adef4c16bb5dc358294695d92.jpg) +Figure 2: Statistical distribution of token counts for S1-Bench questions. + +notators are experienced graduate students familiar with LLMs and well-acquainted with the goals of S1-Bench. Finally, based on these evaluation outcomes, three annotators discuss and collectively decide whether to retain, modify, or discard each question. + +Implementation of A Posteriori Simplicity. First, each question obtained from the previous stage is input into the small LLM $\text{validators}^4$ with 7~9 B parameters. For each question, we sample 10 answers at three different temperature settings (0, 0.2, and 0.4), resulting in a total of 30 responses per question. These responses are then individually evaluated for correctness using GPT-4o. Second, if all 30 sampled responses are correct, the question is accepted into S1-Bench. Otherwise, the question is returned to the generators, where a difficulty-reduction prompt (see Appendix 10) is applied to simplify it. The simplified questions then undergoes the same subsequent process. Finally, questions fail to meet the full-accuracy criterion (i.e., 30 out of 30 correct) after three rounds of difficulty reduction are excluded from the workflow. + +The final S1-Bench comprises questions that satisfy both human-based a priori simplicity constraints and LLM-based a posteriori simplicity verification. + +# 3.3 Benchmark Statistics + +S1-Bench comprises 422 question-answer pairs across four major categories and 28 subcategories, balanced with 220 English and 202 Chinese questions. Figure 2 shows the token length distribution, with questions averaging 14.46 tokens. To ensure that the a posteriori verification process does not introduce simplicity only tailored to the small LLM validator, we evaluate S1-Bench on five additional LLMs and on Qwen3 Family with reasoning modes disabled. As shown, even the 1.7B model achieves + +
Modelt=0.0t=0.2t=0.4Tokens
Gemma2-9B100.00100.00100.0038.77
Llama3.1-8B100.00100.00100.0042.00
Mistral-8B100.00100.00100.0044.38
Qwen2.5-7B100.00100.00100.0042.81
DeepSeek-v3100.00100.00100.0079.53
Llama3.3-70B100.0099.7699.7653.71
Qwen2.5-14B99.7499.7699.7640.00
Qwen2.5-32B99.9899.9899.9843.17
Qwen2.5-72B100.00100.00100.0044.61
Qwen3-32B (w/o think)100.00100.00100.00103.30
Qwen3-14B (w/o think)100.00100.00100.0086.35
Qwen3-8B (w/o think)100.00100.0099.7690.54
Qwen3-1.7B (w/o think)98.1097.1695.73114.32
+ +Table 2: Average accuracy (acc@k) and response token count of different LLMs, each sampled 10 times at three temperature settings. + +over $98\%$ accuracy at temperature 0. + +# 4 Main Experiment + +# 4.1 Baseline Models and Configurations + +We evaluated 28 different LRMs, which are explicitly trained to first respond with a thinking process, and then generate a final answer. These LRMs include open-source families, such as DeepSeek (Guo et al., 2025), Qwen (Yang et al., 2025a), Nemotron (Bercovich et al., 2025), LightR1 (Wen et al., 2025), s1.1 (Muennighoff et al., 2025), EXAONE (Research et al., 2025), and SkyT1 (Griggs et al., 2025), as well as closed-source Hunyuan-T1 (Tencent, 2025), spanning from tiny (1.5B) to large (671B) parameter sizes5. Notably, OpenAI's o-series models are not included as they do not disclose thinking processes to users. For each model, we consider two sets of generation configurations: Greedy sampling with temperature $t = 0$ ; Top-p sampling with temperature $t = 0.6$ , topp=0.95 and sampling size $k = 5$ . Only top-p sampling results are reported in the main text; greedy decoding results are provided in the Appendix C.1. + +# 4.2 Evaluation Metrics + +Format Metrics. To assess the formatting quality of LRM responses, we compute the proportion of responses that satisfy the following two formatting criteria (averaged over 5 runs for top-p sampling). S-Corr (Strict Format Correctness Rate): In general, an end thinking marker (e.g., $\langle$ /think $\rangle$ ) is expected to separate the thinking process from the non-empty final answer. S-Corr measures the proportion of responses that satisfy this criterion. + +
Model IDSizeLoose FormatStrict FormatL-Corr ↑S-Corr ↑Tokens ↓
pass@1↑acc@k↑pass@1↑acc@k↑
Validator LLMs7-9B100.00100.00100.00100.00--42.00
Qwen3-A22B235B99.9199.7699.9199.76100.00100.00701.65
Qwen3-A3B30B99.9599.7699.9599.76100.00100.00638.40
QwQ-32B32B100.00100.00100.00100.00100.00100.00720.10
Qwen3-32B32B99.9199.5399.9199.5399.9199.91668.69
Qwen3-14B14B99.9599.7699.9599.7699.9599.95582.99
Qwen3-8B8B99.9599.7699.9599.7699.9599.95657.76
Qwen3-1.7B1.7B99.3497.3999.3497.3999.8199.81595.90
Hunyuan-T1-99.9199.5399.9199.53100.00100.00542.31
DS-R1671B100.00100.00100.00100.00100.00100.00646.40
DS-R1-70B70B99.4897.3999.3896.92100.0099.91453.81
DS-R1-32B32B99.7298.8299.7298.82100.00100.00429.91
DS-R1-14B14B99.5797.8799.5797.87100.00100.00475.46
DS-R1-8B8B97.4497.1697.3997.1699.7699.53452.11
DS-R1-7B7B95.2185.7895.2185.7899.2499.24454.55
DS-R1-1.5B1.5B81.4754.5081.4754.5097.5897.58489.54
Sky-T1-32B32B98.8294.7994.8879.6299.4895.26163.00
Nemotron-49B49B99.1597.3999.1597.39100.00100.00362.54
Nemotron-8B8B86.1669.9179.8159.0099.4384.31372.57
L-R1-32B32B97.8791.0094.7479.6298.9195.071095.36
L-R1-32B-DS32B99.5798.1099.5798.1099.8199.81524.12
L-R1-14B-DS14B99.0595.9799.0595.9799.1999.19693.19
L-R1-7B-DS7B94.6483.6594.6483.6599.7699.67496.47
s1.1-32B32B99.5398.3499.4898.1099.5799.53998.00
s1.1-14B14B97.6393.6097.2591.9497.7797.39839.86
s1.1-7B7B96.6888.3988.5863.9897.1188.96711.49
EXAONE-32B32B97.0694.0897.0694.0899.8199.81800.56
EXAONE-7.8B7.8B88.1575.1287.8274.4198.7298.061046.87
EXAONE-2.4B2.4B72.4256.1672.3256.1697.4497.251593.96
+ +Table 3: Main results in the top-p sampling setting on the S1-Bench, sorted by model family. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst. + +L-Corr (Loose Format Correctness Rate): LRMs may occasionally generate responses with endless thinking. L-Corr quantifies the proportion of responses that do not exhibit this failure mode. Detailed format types are given in Appendix B.4. + +Efficiency Metrics. We calculate the average token counts for responses (Tokens) except for those generate endless thinking. Token counts are obtained using the Qwen2.5 tokenizer. + +Accuracy Metrics. We calculate accuracy metrics under both strict and loose formatting requirements, respectively. We use GPT-4o as the evaluator to assess the correctness of the responses6, with the evaluation prompt in Appendix B.2. For greedy sampling, we directly calculate the accuracy rate. For top-p sampling, we utilize two metrics: Pass@1 and Acc@k. Pass@1 follows DeepSeekR1 (Guo et al., 2025), and Acc@k is the percentage of questions with all k answers correct. The two metrics use k=5, and their detailed definitions can be found in Appendix B.3. Notably, S-Corr \ L-Corr represents the upper bound for pass@1 and acc@5 under strict \ loose formatting requirements. + +# 4.3 Main Results + +Table 3 and Figure 8 presents the main results of LRMs on S1-Bench, revealing two key phenomena. + +LRMs exhibit significantly lower efficiency than LLMs on S1-Bench, and no clear correlation is observed between ART and model size. We observed the following: First, state-of-the-art LRMs, such as DeepSeek-R1 and Qwen3, do not demonstrate a distinct advantage in efficiency. In contrast, Sky-T1-32B, which undergoes specific optimizations to mitigate overthinking using SimPO, achieves the highest efficiency. Second, The L-R1-DS 7B/14B/32B models are further post-trained from the DS-R1-7B/14B/32B models. The L-R1-DS models tend to produce longer responses, suggesting that while additional post-training may enhance the model's capability for complex reasoning, it comes at the cost of response efficiency. Finally, the s1.1 models generate considerably longer responses than the DeepSeek-R1-Distilled models. Despite both models being trained solely with SFT to acquire long-COT reasoning ability, the DeepSeek-R1-Distilled models use 800K training samples, while the s1.1 models are trained on only 1K. This discrepancy suggests that the smaller training set may lead to superficial imitation of long reasoning patterns, resulting in verbose thinking on + +
Model IDSizeS1-Bench-ENS1-Bench-ZHAvg
RSNKNOIFANAAvgRSNKNOIFANAAvg
Gemma2-9B9B74.829.45.352.445.951.619.87.535.131.038.8
Llama3.1-8B8B91.035.412.461.956.044.028.315.218.726.742.0
Qwen2.5-7B7B65.546.36.449.646.550.546.69.836.938.842.8
Mistral-8B8B67.255.58.650.149.647.356.114.829.738.744.4
Column Avg-74.641.68.253.549.548.337.711.830.133.842.0
Sky-T1-32B32B215.8174.198.5233.3194.3125.5125.399.4145.5128.9163.0
Nemotron-49B49B599.7587.6396.5526.1540.4232.9157.3235.5107.8168.8362.5
Nemotron-8B8B561.0585.1458.0303.1462.6369.5326.0288.1166.7273.5372.6
DS-R1-32B32B421.8504.4414.7521.1473.7362.2385.6343.1408.8382.2429.9
DS-R1-8B8B472.2528.9530.7462.7491.2521.9404.4266.2395.5409.4452.1
DS-R1-70B70B464.1501.3378.5536.1484.0450.8450.2328.4416.7420.9453.8
DS-R1-7B7B447.5623.9353.8510.0495.5446.5463.2339.5373.0409.4454.5
DS-R1-14B14B503.7674.7367.3494.2519.0452.0465.4375.3405.8428.0475.5
DS-R1-1.5B1.5B480.8584.7417.4577.2529.1493.0497.4329.8423.1446.0489.5
L-R1-7B-DS7B568.1667.1501.7566.3580.3444.8454.6344.1366.4405.0496.5
L-R1-32B-DS32B574.5706.6647.6632.8636.3431.2367.0377.1418.7402.2524.1
Hunyuan-T1-561.6693.8380.9435.0521.2676.8553.8505.1523.8565.3542.3
Qwen3-14B14B700.4639.5286.2575.0579.8730.4557.2403.1586.0586.5583.0
Qwen3-1.7B1.7B790.4720.6399.9526.2624.6689.8563.6406.4545.9564.7595.9
Qwen3-A3B30B745.0729.3328.1594.8625.7773.7655.8453.7648.6652.2638.4
DS-R1671B786.1723.8711.4529.2672.5727.3638.5607.9533.9617.9646.4
Qwen3-8B8B853.7753.1394.4629.5683.2749.2623.8459.3624.0630.0657.8
Qwen3-32B32B805.7774.2356.9645.5674.7780.2695.2446.6645.3662.1668.7
L-R1-14B-DS14B951.01026.0829.8653.5848.2594.7610.1442.2451.7525.7693.2
Qwen3-A22B235B925.3864.3487.2605.7734.5803.3713.4487.2611.3665.9701.7
s1.1-7B7B1039.5840.81923.2529.4929.9489.6351.31034.3332.4475.6711.5
QwQ-32B32B873.3808.1520.8634.7722.4866.9707.3613.3667.7717.6720.1
EXAONE-32B32B1323.71057.61537.0711.61086.4703.2348.61302.9125.5490.3800.6
s1.1-14B14B871.8746.22233.1708.1960.2654.6546.01512.6579.7710.7839.9
s1.1-32B32B1077.9889.72055.4781.71081.7995.6765.21634.6666.5906.5998.0
EXAONE-7.8B7.8B1498.31398.91775.7882.41303.81410.3497.81633.1205.0767.01046.9
L-R1-32B32B1614.01217.31996.9930.11338.31035.6737.71240.7610.2835.31095.4
EXAONE-2.4B2.4B1927.31426.21200.1825.71320.72469.71622.62471.61511.21898.71594.0
Column Avg-809.1766.0785.1591.5718.4695.8545.9677.9482.0576.7650.3
Improvement-×10.8×18.4×96.0×11.1×14.5×14.4×14.5×57.3×16.0×17.1×15.5
+ +Table 4: Average response tokens in the top-p sampling setting on the S1-bench across two languages and four main categories. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst. Bold represents the maximum Improvement value for each language. + +simple questions. + +Several LRMs exhibit under-accuracy and limited robustness on simple questions. First, our observations find that, despite employing deep reasoning, most LRMs tend to exhibit lower accuracy on simple questions compared to traditional LLMs. For example, DS-R1-1.5B and EXAONE-2.4B achieve just above $50\%$ acc@k. Second, many LRMs struggle with robust correctness in top-p sampling, where acc@k is significantly lower than pass@1. This issue is particularly pronounced in smaller LRMs. For instance, DS-R1-1.5B achieved $81.47\%$ pass@1 but only $54.50\%$ acc@k. + +# 5 Efficiency Analysis + +# 5.1 Analysis across Question Types + +To better understand the efficiency differences of LRRMs across question types, we analyze the average response tokens across 4 main categories, 28 + +subcategories, and two languages. The results are displayed in Table 4 and Appendix C.2. + +LRMs exhibit a substantial increase in response length across all four major categories, 28 subcategories, and two languages. As shown in Table 4, for each of the four major categories, the average response length of LRMs exceeds that of LLMs by more than a factor of ten. Response lengths also increase significantly across all subcategories (see Appendix C.2). This suggests that while LRMs are primarily trained on reasoning data to produce long CoT style responses, this stylistic pattern generalizes well across a wide range of question types. + +Moreover, 23 out of the 28 LRMs produce longer responses to questions in English than Chinese. + +LRMs exhibit the most significant increase in ART for instruction following questions and tend to over-exlore when the solution space is vast. As shown in Table 4, although small LLMs + +![](images/7a3795cc2e55a9b576ec292b5aa56d5cdc6ea397ea09993746b001df9df3ab87.jpg) +Figure 3: (a) Comparison of first round and additional token costs for each LRM. (b) Distribution of solution rounds for each LRM. + +provide the most concise responses to instruction following questions, LRMs generate dramatically longer outputs—96.0 times longer in English and 57.3 times longer in Chinese than small LLMs. To investigate the cause, we further analyze the subcategories of instruction following questions. As shown in Appendix C.2, average tokens is notably longer in the subcategories of length constraints, character constraints, and sentence constraints. These three question types share a similar characteristic: their correctness is easy to verify, but the solution space is vast. We find that, although the model quickly identifies a correct answer, it becomes trapped in the search space, continually exploring alternatives and failing to stop in time. A case can be seen in Table 21. This phenomenon is more pronounced in families with lower efficiency, such as s1.1 and EXAONE. + +# 5.2 Thinking Solution Analysis + +To better understand the causes of inefficiency in LRMs on S1-Bench, we analyze the solution rounds of their thinking processes7. We first use DeepSeek-v3 to segment each thinking process into several solutions, each defined as a point at which LRMs explicitly arrives at a conclusion that matches the correct answer. We then compute the average token counts in the first solution. The detailed experimental setup is provided in Appendix C.3. Our analysis reveals the following: + +![](images/165f30c01295e8eb67f8483f5b0550795a5819862b0e4f5ca8b4d0a0d987a2ac.jpg) +Figure 4: Distribution of the thinking process across four categories. FA and TP refer to Final Answer and Thinking Process, respectively. Green bars indicate cases where the final answer is correct, while red bars indicate cases where it is incorrect. + +The token consumed in the first solution of LRMs significantly exceeds that of validator LLMs, as shown in Figure 3 (a). This suggests that LRMs may involve unnecessary reasoning steps in each solution, which could be one of the reasons for their inefficiency. + +The primary reason for efficiency gaps between LRMs lies in the number of redundant solution rounds they generate, rather than the token cost in the initial round. As shown in Figure 3 (a), although total thinking token counts vary widely across LRMs, their token counts in the initial round are similar and only account for a small fraction of the total. Figure 3 (b) further shows the distribution of solution rounds on S1-Bench, revealing that LRMs with longer thinking processes tend to generate more solution round, and this redundancy greatly increases computational cost. Furthermore, further experiments reveal that the redundancy in the reasoning process gradually increases over time. Appendix C.4 presents the experimental details. + +# 6 Error Analysis + +This section analyzes the errors made in the thinking process. Specifically, we utilize DeepSeekv3 to categorize the responses of LRMs into four cases and compute the corresponding proportions: (1) Final answer correct; thinking process entirely accurate. (2) Final answer correct; thinking process contains intermediate errors. (3) Final answer incorrect; correct answer mentioned in thinking process. (4) Final answer incorrect; correct answer never mentioned in thinking process. The classification details are in Appendix C.5; results are shown in Figure 4. Key findings include: + +![](images/36f5f6a81bdd9b531a8ac7893e1e2c26b7fb338fe001e3626140dc3806b85f1b.jpg) +Figure 5: Top: Count of "gut moments" across models. Bottom: Probability of "gut moments" by question type. + +Lower-accuracy LRMs tend to produce less reliable reasoning chains; even when they arrive at the correct final answer, their intermediate steps often contain errors (light green). LRMs with high accuracy (e.g., DS-R1) show almost no flawed reasoning steps, whereas those with lower accuracy (e.g., DS-R1-1.5B) often generate incorrect intermediate conclusions, further indicating that they lack robust reasoning ability. + +Although LRMs sometimes mention the correct answer during reasoning, they may deviate and ultimately produce incorrect final answers (light red). In one case, the LRM initially arrived at the correct answer but undermined it through excessive verification, a case can be seen in Table 24. In another case, the LRM directly denies the correct answer, a case can be seen in Table 23. + +# 7 Gut Moment + +We observe an intriguing phenomenon on S1-Bench: LRMs sometimes show an early sense of question difficulty before solving, which we call the "gut moment." To explore this phenomenon, we prompt GPT-4o to classify the initial part of model responses (before the first "\n\ntypes based on its comment on difficulty: easy, neutral, difficult, and no comment. Figure 5 presents these classifications and their probabilities across four question types. Experimental details and cases are in Appendix C.6. This leads to the following observations: + +First, all LRMs show the "gut moment" phenomenon to varying degrees, which is more evident + +![](images/a5c4a6d06d561fec23d5e14de192381c1db489b74d1bad7bd2dc349cd4dde9ae.jpg) +Figure 6: Average response tokens in the easy category vs. all samples. Dots show difference: easy minus all. + +in the Qwen, DeepSeek, and Light-R1 families and Hunyuan-T1. Second, LRMs show stylistic differences in expressing "gut moment." For example, the Qwen family often views questions as simple, whereas the DeepSeek-distilled models show more diverse difficulty comments. Third, some LRMs show significantly stronger "gut moment" in Chinese than in English, such as the Qwen and DeepSeek families, likely due to a higher proportion of Chinese in their training data. Finally, the "gut moment" is most evident in reasoning questions and rarely appears in analytical questions, except in DeepSeek-distilled models. + +To investigate whether the early sense of a question as "easy" leads to a corresponding reduction in response length, we compare the average response tokens for questions in the easy category versus all samples. The results are shown in Figure 6. Except for L-R1-32B, other LRMs do not exhibit a noticeable decrease in response length when questions are viewed as "easy"; in fact, 21 out of 28 LRMs showed an increase in response length under this condition. This suggests a discrepancy between the LRM's initial sense of difficulty and its generative behavior, the causes and improvements of which warrant further investigation. + +# 8 Conclusion + +This paper introduces S1-Bench, the first benchmark designed to evaluate system 1 thinking capabilities in LRMs. We conduct extensive evaluations across 28 LRMs, revealing their inefficiency, inadequate accuracy, and limited robustness when handling simple questions. Additionally, we observe "gut moment" and find a gap between their difficulty perception and generation length. Overall, this work paves the way toward dual-system compatibility in the development of LRMs. + +# Limitations + +Although S1-Bench pioneers the evaluation of system 1 thinking in LRMs, it still has several limitations. First, due to our emphasis on ensuring the uniqueness of each sample during dataset construction—for instance, including only one question for basic arithmetic operations such as addition, subtraction, and multiplication—the overall scale of the benchmark remains limited. As a next step, we plan to expand the scale of S1-Bench. Second, while recent months have seen a surge in newly released open-source LRMs, we have only evaluated 28 representative models and have not covered the full spectrum of available models. Lastly, we do not propose methods to improve the efficiency of LRMs on system 1 tasks in this work; this will be the focus of our future research. + +# References + +Pranjal Aggarwal and Sean Welleck. 2025. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697. +AI-MO. 2024. Amc 2023. +Daman Arora and Andrea Zanette. 2025. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463. +Simon A Aytes, Jinheon Baek, and Sung Ju Hwang. 2025. Sketch-of-thought: Efficient llm reasoning with adaptive cognitive-inspired sketching. arXiv preprint arXiv:2503.05179. +Akhiad Bercovich, Itay Levy, Izik Golan, Mohammad Dabbah, Ran El-Yaniv, Omri Puny, Ido Galil, Zach Moshe, Tomer Ronen, Najeeb Nabwani, Ido Shahaf, Oren Tropp, Ehud Karpas, Ran Zilberstein, Jiaqi Zeng, Soumye Singhal, Alexander Bukharin, Yian Zhang, Tugrul Konuk, and 113 others. 2025. Llamameton: Efficient reasoning models. Preprint, arXiv:2505.00949. +Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, and 1 others. 2024. A survey on evaluation of large language models. ACM transactions on intelligent systems and technology, 15(3):1-45. +Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. 2025a. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567. + +Xiaoshu Chen, Sihang Zhou, Ke Liang, and Xinwang Liu. 2024a. Distilling reasoning ability from large language models with adaptive thinking. arXiv preprint arXiv:2404.09170. +Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Quzhi Liu, Mengfei Zhou, Zhuosheng Zhang, and 1 others. 2024b. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187. +Yilong Chen, Junyuan Shang, Zhengyu Zhang, Jiawei Sheng, Tingwen Liu, Shuohuan Wang, Yu Sun, Hua Wu, and Haifeng Wang. 2024c. Mixture of hidden-dimensions transformer. arXiv preprint arXiv:2412.05644. +Yilong Chen, Junyuan Shang, Zhenyu Zhang, Yanxi Xie, Jiawei Sheng, Tingwen Liu, Shuohuan Wang, Yu Sun, Hua Wu, and Haifeng Wang. 2025b. Inner thinking transformer: Leveraging dynamic depth scaling to foster adaptive internal thinking. arXiv preprint arXiv:2502.13842. +Cheng-Han Chiang and Hung-yi Lee. 2024. Overreasoning and redundant calculation of large language models. In Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 2: Short Papers), pages 161-169, St. Julian's, Malta. Association for Computational Linguistics. +Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, and 1 others. 2021. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168. +Yingqian Cui, Pengfei He, Jingying Zeng, Hui Liu, Xianfeng Tang, Zhenwei Dai, Yan Han, Chen Luo, Jing Huang, Zhen Li, and 1 others. 2025. Stepwise perplexity-guided refinement for efficient chain-of-thought reasoning in large language models. arXiv preprint arXiv:2502.13260. +Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, and 1 others. 2025. Dynamic parallel tree search for efficient llm reasoning. arXiv preprint arXiv:2502.16235. +Xiachong Feng, Longxu Dou, and Lingpeng Kong. 2025. Reasoning does not necessarily improve roleplaying ability. arXiv preprint arXiv:2502.16940. +Yichao Fu, Junda Chen, Siqi Zhu, Zheyu Fu, Zhongdongming Dai, Aurick Qiao, and Hao Zhang. 2024. Efficiently serving llm reasoning programs with certainindex. arXiv preprint arXiv:2412.20993. +Yichao Fu, Junda Chen, Yonghao Zhuang, Zheyu Fu, Ion Stoica, and Hao Zhang. 2025. Reasoning without self-doubt: More efficient chain-of-thought through certainty probing. In ICLR 2025 Workshop on Foundation Models in the Wild. + +Isabel O Gallegos, Ryan A Rossi, Joe Barrow, Md Mehrab Tanjim, Sungchul Kim, Franck Dernoncourt, Tong Yu, Ruiyi Zhang, and Nesreen K Ahmed. 2024. Bias and fairness in large language models: A survey. Computational Linguistics, 50(3):1097-1179. +Tyler Griggs, Shiyi Cao, Dacheng Li, Shu Liu, Shishir G. Patil, Matei Zaharia, Joey Gonzalez, and Ion Stoica. 2025. Think less, achieve more: Cut reasoning costs by $50\%$ without sacrificing accuracy. +Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, and 1 others. 2025. DeepSeek-R1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948. +Tingxu Han, Zhenting Wang, Chunrong Fang, Shiyu Zhao, Shiqing Ma, and Zhenyu Chen. 2024. Token-budget-aware llm reasoning. arXiv preprint arXiv:2412.18547. +Masoud Hashemi, Oluwanifemi Bamgp Bose, Sathwik Tejaswi Madhusudhan, Jishnu Sethumadhavan Nair, Aman Tiwari, and Vikas Yadav. 2025. Dna bench: When silence is smarter-benchmarking over-reasoning in reasoning llms. arXiv preprint arXiv:2503.15793. +Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, and 1 others. 2024. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008. +Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2021a. Measuring massive multitask language understanding. In International Conference on Learning Representations. +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021b. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874. +Bairu Hou, Yang Zhang, Jiabao Ji, Yujuan Liu, Kaizhi Qian, Jacob Andreas, and Shiyu Chang. 2025. Thinkprune: Pruning long chain-of-thought of llms via reinforcement learning. arXiv preprint arXiv:2504.01296. +Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. 2025. Livecodebench: Holistic and contamination free evaluation of large language models for code. In The Thirteenth International Conference on Learning Representations. + +Fengqing Jiang, Zhangchen Xu, Yuetai Li, Luyao Niu, Zhen Xiang, Bo Li, Bill Yuchen Lin, and Radha Poovendran. 2025. Safechain: Safety of language models with long chain-of-thought reasoning capabilities. arXiv preprint arXiv:2502.12025. +Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. 2025a. Overthink: Slowdown attacks on reasoning llms. arXiv e-prints, pages arXiv-2502. +Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip HS Torr, Salman Khan, and Fahad Shahbaz Khan. 2025b. Llm post-training: A deep dive into reasoning large language models. arXiv preprint arXiv:2502.21321. +Ayeong Lee, Ethan Che, and Tianyi Peng. 2025. How well do llms compress their own chain-of-thought? a token complexity approach. arXiv preprint arXiv:2503.01141. +Dacheng Li, Shiyi Cao, Tyler Griggs, Shu Liu, Xiangxi Mo, Eric Tang, Sumanth Hegde, Kourosh Hakhamaneshi, Shishir G Patil, Matei Zaharia, and 1 others. 2025a. Llms can easily learn to reason from demonstrations structure, not content, is what matters! arXiv preprint arXiv:2502.07374. +Yiwei Li, Peiwen Yuan, Shaoxiong Feng, Boyuan Pan, Xinglin Wang, Bin Sun, Heda Wang, and Kan Li. 2024. Escape sky-high cost: Early-stopping self-consistency for multi-step reasoning. arXiv preprint arXiv:2401.10480. +Zhong-Zhi Li, Duzhen Zhang, Ming-Liang Zhang, Ji-axin Zhang, Zengyan Liu, Yuxuan Yao, Haotian Xu, Junhao Zheng, Pei-Jie Wang, Xiuyi Chen, and 1 others. 2025b. From system 1 to system 2: A survey of reasoning large language models. arXiv preprint arXiv:2502.17419. +Baohao Liao, Yuhui Xu, Hanze Dong, Junnan Li, Christof Monz, Silvio Savarese, Doyen Sahoo, and Caiming Xiong. 2025. Reward-guided speculative decoding for efficient ltm reasoning. arXiv preprint arXiv:2501.19324. +Ximing Lu, Seungju Han, David Acuna, Hyunwoo Kim, Jaehun Jung, Shrimai Prabhumoye, Niklas Muennighoff, Mostofa Patwary, Mohammad Shoeybi, Bryan Catanzaro, and 1 others. 2025. Retro-search: Exploring untaken paths for deeper and efficient reasoning. arXiv preprint arXiv:2504.04383. +Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. 2025. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning. arXiv preprint arXiv:2501.12570. +Wenjie Ma, Jingxuan He, Charlie Snell, Tyler Griggs, Sewon Min, and Matei Zaharia. 2025a. Reasoning models can be effective without thinking. arXiv preprint arXiv:2504.09858. + +Xinyin Ma, Guangnian Wan, Runpeng Yu, Gongfan Fang, and Xinchao Wang. 2025b. Cot-valve: Length-compressible chain-of-thought tuning. arXiv preprint arXiv:2502.09601. +MAA Committees. Aime problems and solutions. https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions. +Shen-Yun Miao, Chao-Chun Liang, and Keh-Yih Su. 2021. A diverse corpus for evaluating and developing english math word problem solvers. arXiv preprint arXiv:2106.15772. +Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, and 1 others. 2024. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393. +Tergel Munkhbat, Namgyu Ho, Seo Hyun Kim, Yongjin Yang, Yujin Kim, and Se-Young Yun. 2025. Self-training elicits concise reasoning in large language models. URL https://arxiv.org/abs/2502.20122. +OpenAI. 2024. Learning to reason with LLMs. https://openai.com/index/learning-to-reason-with-11ms/. +Rui Pan, Yinwei Dai, Zhihao Zhang, Gabriele Oliaro, Zhihao Jia, and Ravi Netravali. 2025. Specreason: Fast and accurate inference-time compute via speculative reasoning. arXiv preprint arXiv:2504.07891. +Arkil Patel, Satwik Bhattachamishra, and Navin Goyal. 2021. Are nlp models really able to solve simple math word problems? In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2080-2094. +Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, and 1 others. 2025a. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond. arXiv preprint arXiv:2503.21614. +Yuxiao Qu, Matthew YR Yang, Amrith Setlur, Lewis Tunstall, Edward Emanuel Beeching, Ruslan Salakhutdinov, and Aviral Kumar. 2025b. Optimizing test-time compute via meta reinforcement finetuning. arXiv preprint arXiv:2503.07572. +David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2024. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling. + +LG Research, Kyunghoon Bae, Eunbi Choi, Kibong Choi, Stanley Jungkyu Choi, Yemuk Choi, Seokhee Hong, Junwon Hwang, Hyojin Jeon, Kijeong Jeon, and 1 others. 2025. Exaone deep: Reasoning enhanced language models. arXiv preprint arXiv:2503.12524. +Jianshu She, Zhuohao Li, Zhemin Huang, Qi Li, Peiran Xu, Haonan Li, and Qirong Ho. 2025. Hawkeye: Efficient reasoning with model collaboration. arXiv preprint arXiv:2504.00424. +Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. 2025a. Dast: Difficulty-adaptive slow-thinking for large reasoning models. arXiv preprint arXiv:2503.04472. +Zhenyi Shen, Hanqi Yan, Linhai Zhang, Zhanghao Hu, Yali Du, and Yulan He. 2025b. Codi: Compressing chain-of-thought into continuous space via self-distillation. arXiv preprint arXiv:2502.21074. +Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen Zhong, Hanjie Chen, and 1 others. 2025. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419. +Alon Talmor, Jonathan Herzig, Nicholas Lourie, and Jonathan Berant. 2019. Commonsenseqa: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4149-4158. +Amir Taubenfeld, Tom Sheffer, Eran Ofek, Amir Feder, Ariel Goldstein, Zorik Gekhman, and Gal Yona. 2025. Confidence improves self-consistency in llms. arXiv preprint arXiv:2502.06233. +Kimi Team, A Du, B Gao, B Xing, C Jiang, C Chen, C Li, C Xiao, C Du, C Liao, and 1 others. 2025a. Kimi k1. 5: Scaling reinforcement learning with llms. URL https://arxiv.org/abs/2501.12599. +Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, and 1 others. 2025b. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599. +NovaSky Team. 2025a. Sky-t1: Train your own o1 preview model within $450. https://novasky-ai.github.io/posts/sky-t1. +Qwen Team. 2025b. QwQ-32b: Embracing the power of reinforcement learning. https://qwenlm.github. b.io/blog/qwq-32b/. +Tencent. 2025. Reasoning efficiency redefined! meet Tencent's 'hunyuan-t1'—the first mamba-powered ultra-large model. https://llm.hunyuan.tencent.com/#/Blog/hy-t1/. + +Guangya Wan, Yuqi Wu, Jie Chen, and Sheng Li. 2024. Dynamic self-consistency: Leveraging reasoning paths for efficient llm sampling. arXiv preprint arXiv:2408.17017. +Junlin Wang, Shang Zhu, Jon Saad-Falcon, Ben Athiwaratkun, Qingyang Wu, Jue Wang, Shuaiwen Leon Song, Ce Zhang, Bhuwan Dhingra, and James Zou. 2025a. Think deep, think fast: Investigating efficiency of verifier-free inference-time-scaling methods. arXiv preprint arXiv:2504.14047. +Xinglin Wang, Shaoxiong Feng, Yiwei Li, Peiwen Yuan, Yueqi Zhang, Chuyi Tan, Boyuan Pan, Yao Hu, and Kan Li. 2024. Make every penny count: Difficulty-adaptive self-consistency for cost-efficient reasoning. arXiv preprint arXiv:2408.13457. +Yiming Wang, Pei Zhang, Siyuan Huang, Baosong Yang, Zhuosheng Zhang, Fei Huang, and Rui Wang. 2025b. Sampling-efficient test-time scaling: Self-estimating the best-of-n sampling in early decoding. arXiv preprint arXiv:2503.01422. +Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, and 1 others. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837. +Liang Wen, Yunke Cai, Fenrui Xiao, Xin He, Qi An, Zhenyu Duan, Yimin Du, Junchen Liu, Lifu Tang, Xiaowei Lv, and 1 others. 2025. Light-R1: Curriculum sft, dpo and rl for long cot from scratch and beyond. arXiv preprint arXiv:2503.10460. +Heming Xia, Yongqi Li, Chak Tou Leong, Wenjie Wang, and Wenjie Li. 2025. Tokenskip: Controllable chain-of-thought compression in llms. arXiv preprint arXiv:2502.12067. +Jingxian Xu, Mengyu Zhou, Weichang Liu, Hanbing Liu, Shi Han, and Dongmei Zhang. 2025a. Twt: Thinking without tokens by habitual reasoning distillation with multi-teachers' guidance. arXiv preprint arXiv:2503.24198. +Yuhui Xu, Hanze Dong, Lei Wang, Doyen Sahoo, Junnan Li, and Caiming Xiong. 2025b. Scalable chain of thoughts via elastic reasoning. arXiv preprint arXiv:2505.05315. +Yuchen Yan, Yongliang Shen, Yang Liu, Jin Jiang, Mengdi Zhang, Jian Shao, and Yueting Zhuang. 2025. Infty think: Breaking the length limits of long-context reasoning in large language models. arXiv preprint arXiv:2503.06692. +An Yang, Anfeng Li, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Gao, Chengen Huang, Chenxu Lv, and 1 others. 2025a. Qwen3 technical report. arXiv preprint arXiv:2505.09388. + +Chenxu Yang, Qingyi Si, Yongjie Duan, Zheliang Zhu, Chenyu Zhu, Zheng Lin, Li Cao, and Weiping Wang. 2025b. Dynamic early exit in reasoning models. arXiv preprint arXiv:2504.15895. +Junjie Yang, Ke Lin, and Xing Yu. 2025c. Think when you need: Self-adaptive chain-of-thought learning. arXiv preprint arXiv:2504.03234. +Wang Yang, Xiang Yue, Vipin Chaudhary, and Xiaotian Han. 2025d. Speculative thinking: Enhancing small-model reasoning with large model guidance at inference time. arXiv preprint arXiv:2504.12329. +Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. 2025e. Towards thinking-optimal scaling of test-time compute for llm reasoning. arXiv preprint arXiv:2502.18080. +Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. 2025. LIMO: Less is more for reasoning. arXiv preprint arXiv:2502.03387. +Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373. +Bin Yu, Hang Yuan, Yuliang Wei, Bailing Wang, Weizhen Qi, and Kai Chen. 2025a. Long-short chain-of-thought mixture supervised fine-tuning eliciting efficient reasoning in large language models. arXiv preprint arXiv:2505.03469. +Zhaojian Yu, Yinghao Wu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. 2025b. Z1: Efficient test-time scaling with code. arXiv preprint arXiv:2504.00810. +Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. 2025a. Lighthinker: Thinking step-by-step compression. arXiv preprint arXiv:2502.15589. +Wenyuan Zhang, Tianyun Liu, Mengxiao Song, Xiaodong Li, and Tingwen Liu. 2025b. SOTOPIAΩ: Dynamic strategy injection learning and social instruction following evaluation for social agents. Preprint, arXiv:2502.15538. +Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. 2023. Judging LLM-as-a-judge with MT-bench and chatbot arena. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track. +Alireza S Ziabari, Nona Ghazizadeh, Zhivar Sourati, Farzan Karimi-Malekabadi, Payam Piray, and Morteza Dehghani. 2025. Reasoning on a spectrum: Aligning llms to system 1 and system 2 thinking. arXiv preprint arXiv:2502.12470. + +# A More Information of S1-Bench Construction + +# A.1 Benchmark Statistics + +We survey studies on improving the efficiency of LRMs, as there is potential overlap between these studies and the technical approaches aimed at enhancing system 1 thinking in LRMs. Table 7 presents the results of our survey. We compile the benchmarks used in these studies for evaluation, that are typically used to verify whether models achieve efficiency improvements. Benchmarks that appear more than four times include: MATH500 (Hendrycks et al., 2021b), GSM8K (Cobbe et al., 2021), AIME24/25 (MAA Committees), GPQA (Rein et al., 2024), AMC23 (AI-MO, 2024), MMLU (Hendrycks et al., 2021a), Olympiad-Bench (He et al., 2024), SVAMP (Patel et al., 2021), LiveCodeBench (Jain et al., 2025), and CommonSenseQA (Talmor et al., 2019). + +The accuracy shown in Table 1 is the average result of the four models, Qwen2.5-7B, Llama3.1-8B, Mistral-8B, and Gemma2-9B, at temperature 0, using GPT-4o as the evaluator. + +# A.2 Subcategories in S1-Bench + +Figure 7 shows the pie chart distribution of 28 subcategories in S1-Bench. For more details on the subcategories, please refer to Table 8,9. + +# A.3 Prompt for S1-Bench construction + +This section presents the prompts used in the construction of S1-Bench, including the Initial Generation prompt, the Discriminating Generation Quality prompt, and the Reduce Difficulty prompt. See Table 10 for details. + +# B Baseline Models and Evaluation Details + +# B.1 Baseline Model Details + +Table 11 presents the abbreviations, IDs, and URLs of LLMs used in this paper. Table 12 displays the abbreviations, IDs, URLs, organizations, training algorithms, and training data volumes of open-source LRMs evaluated in this study. + +# B.2 GPT-4o and Human Evaluation + +We use GPT-4o as the evaluator to assess the correctness of the responses. If a final answer can be isolated, only the final answer is evaluated; otherwise, the entire response is assessed. The evaluation prompt is provided in Table 13. + +![](images/d9840947d8bbf7b1525dab3d32fa42e1b624558bb99254381f1ead27270614bf.jpg) +Figure 7: S1-Bench Category Display. The inner circle represents four major categories, and the outer circle includes 28 subcategories. + +To evaluate the consistency between the GPT-4 judge's assessments and human judgments, we conduct a comprehensive human evaluation study involving three of the authors. Specifically, we randomly sample 20 question-answer pairs from each model's greedy decoding results, resulting in a dataset of 640 pairs derived from 32 models (including 4 verifier LLMs and 28 LRMs). The questions, reference answers, and model responses are then presented to three annotators, who independently judge the correctness of each model response. The final human evaluation results are determined through majority voting. Ultimately, the Cohen's Kappa between the human evaluators and the GPT-4 judge is calculated to be 0.83, indicating an exceptionally high level of agreement. + +# B.3 Accuracy Metrics Details + +Pass@1: Followed DeepSeek-R1 (Guo et al., 2025), we calculate pass@1 to assess the percentage of correct responses among the $k = 5$ generations. Specifically, it is defined as: + +$$ +\text {p a s s} @ 1 = \frac {1}{k} \sum_ {i = 1} ^ {k} p _ {i}, \tag {1} +$$ + +where $p_i$ is the correctness of the i-th generation. Acc@k: Since S1-Bench is composed of extremely simple questions, we calculate acc@k. Specifically, acc@k=1 if all k responses are correct and acc@k = 0 otherwise. It is defined as: + +$$ +\operatorname {a c c} @ \mathrm {k} = \prod_ {i = 1} ^ {k} p _ {i}, \tag {2} +$$ + +![](images/63656eb5bbdbb3adfe97f4b0f590a44d26894e6a8e5d44117cfa384530768d08.jpg) +Figure 8: LRMs exhibit under-accuracy and overthinking on simple problems. Shapes represent organizations, colors represent base model families, with darker colors indicating larger models, and connecting lines represent the relationships between model families and training. + +# B.4 Types and Analysis of Format Errors + +This section introduces a comprehensive taxonomy of format errors and highlights the importance of addressing these issues in future research. Unlike conventional LLMs, LRMs frequently exhibit format errors. These errors are defined by failing to use a unique end thinking marker (e.g.,) to separate the thinking process from the final answer. Format errors increase the difficulty of distinguishing the thinking process from the final answer and reveal the vulnerability of LRMs in following predefined formats. + +To illustrate this phenomenon, we identify 12 distinct types of response formats produced by LRMs, each assigned a unique ID, as shown in Table 5. These 12 types are further grouped into three major categories: + +- Standard-Conforming Responses: These responses meet the expected format by including exactly one end thinking marker (e.g., ) to delimit the thinking process from the final answer. Among these, type ID-100 includes a thinking process, while ID-101 omits it. The proportion of such responses is measured using the S-Corr metric. +- Unreadable Responses: These refer to generation failures, including cases where LRMs produce endlessly thinking content or solely + +produce end thinking markers. The proportion of all other (i.e., readable) responses is measured using the L-Corr metric. + +- Readable but Malformed Responses: These responses deviate from the standard format yet still contain extractable information. In some cases, the final answer is missing (e.g., ID-200, ID-202, ID-205), and we instead evaluate the correctness of the thinking process. In other cases, multiple (e.g., ID-201, ID-203) or unmatched9 (e.g., ID-204, ID-206) end thinking markers are generated. In such instances, we treat the content following the last end thinking marker as the final answer for evaluation. + +Table 14 and Table 15 present the distributions of 12 format types under top-p sampling and greedy sampling, respectively. we find: (1) The infinite generation phenomenon is widespread across most LRMs, particularly concentrated in LRMs with fewer than 32B parameters. (2) The Nemotron and EXAONE families frequently produce correctly formatted responses without any explicit thinking processes. This behavior can be viewed as a mechanism for mitigating over-thinking. However, + +the EXAONE family still exhibits substantial overthinking tendencies, suggesting that LRMs' capability to respond without visible reasoning and their tendency to overthink may be orthogonal characteristics. (3) None of the evaluated LRMs exhibited behaviors classified as ID-205/206. + +
FormatIDmarker (standard)marker (unmatched)marker (number)thinking processfinal answer
Standard100-1
101-1×
Readable but Malformed200-1×
201->1
202->1×
203->1×
204×≥1
205×≥1×
206×≥1×
207××0-
Unreadable300≥1××
301××0-
+ +# C More Experimental Setups & Results + +# C.1 Greedy Sampling Results + +Table 16 presents the performance of LRMs on S1-Bench under greedy sampling. While overall accuracy improves compared to top-p sampling, issues of inefficiency and accuracy degradation on simple questions remain. + +# C.2 Efficiency Analysis across Subcategories. + +Figure 9 illustrates the average response tokens across the 28 subcategories. In the heatmap, both models (rows) and subcategories (columns) are ordered in descending order according to their average number of response tokens. + +# C.3 Solution Analysis Details + +For solution analysis, We only use well-formatted thinking processes with correct final answers, as incorrect answers make it unclear whether LRMs are over-reasoning or under-reasoning, and malformed thinking processes cannot be precisely extracted. The segmentation process is performed by DeepSeek-v3, with prompts detailed in Table 17. We compute the average token count in the first solution round; if no solution is found, we use the token count of the entire thinking process. + +# C.4 Thinking Redundancy Analysis + +We conduct a similarity analysis to analyze how information redundancy in the thinking processes + +changes as reasoning sequences increase. Specifically, we first divide the complete thinking process into k equal-length segments10. Then, we encode each segment using the all-MiniLM-L6-v2 model11. For each segment, we calculate the cosine similarity with all its preceding segments and use the maximum similarity as a measure of its information redundancy. As shown in Figure 10, information redundancy increases across all four main categories as reasoning sequences increase. Sky-T1-32B shows overall lower similarity, which stems from its shorter thinking process, but still demonstrates an upward trend. + +# C.5 Error Analysis Details + +In error analysis, we only use well-formatted samples, as malformed thinking processes cannot be precisely extracted. For samples with correct final answers, we categorize them based on whether the thinking process contains explicit incorrect conclusions in intermediate steps. For samples with incorrect final answers, we categorize them based on whether the correct answer is mentioned at least once during reasoning. We use DeepSeek-v3 for categorization, with prompts provided in Table 18. + +# C.6 Gut Moment Analysis Details + +We prompt GPT-4o to classify the initial part of model responses (before the first '\n\ntypes based on its comment on difficulty: easy, neutral, difficult, and no comment. The prompts for english question can be seen in Table 19. For Chinese queries, we use the translated version of the prompt in Chinese. In Table 6, we show the most common sentence of all LRMs in each type of "gut moment." + +Table 5: Twelve types of response format. + +
TypeSentenceCount
easy-zh这个问题看起来挺简单的308
easy-enthat seems straightforward36
difficult-zh这个问题看起来有点复杂308
difficult-enpercentages can sometimes be tricky7
neutral-zh这个问题看起来好像不难24
neutral-enHmm, interesting3
+ +Table 6: The most common sentence in each type of "gut moment." + +# D Error Cases + +This section presents several error cases observed in LRMs. See Tables 20, 21, 22, 23, 24, and 25. + +
Paper AbbreviationMATHGSM8KAIMEGPQAAMCMMLUOlympiad-BenchSVAMPLiveCode-BenchCommon-SenseQA
Codi (Shen et al., 2025b)
CISC (Taubenfeld et al., 2025)
CoT-Valve (Ma et al., 2025b)
Dast (Shen et al., 2025a)
ATM (Chen et al., 2024a)
DEER (Yang et al., 2025b)
DPTS (Ding et al., 2025)
Dynasor (Fu et al., 2024)
ESC (Li et al., 2024)
Hawkeye (She et al., 2025)
token complexity (Lee et al., 2025)
INFTYTHINK (Yan et al., 2025)
KIMI K1.5 (Team et al., 2025a)
L1 (Aggarwal and Welleck, 2025)
LightThinker (Zhang et al., 2025a)
LS-Mixture SFT (Yu et al., 2025a)
DSC (Wang et al., 2024)
O1-Pruner (Luo et al., 2025)
MRT (Qu et al., 2025b)
Self-Doubt (Fu et al., 2025)
RASC (Wan et al., 2024)
NoThinking (Ma et al., 2025a)
Retro-Search (Lu et al., 2025)
RSD (Liao et al., 2025)
ST-BoN (Wang et al., 2025b)
Elastic Reasoning (Xu et al., 2025b)
FS-BoN (Munkhbat et al., 2025)
SoT (Aytes et al., 2025)
SpecReason (Pan et al., 2025)
Speculative Thinking (Yang et al., 2025d)
SPIRIT (Cui et al., 2025)
ITC Analysis (Wang et al., 2025a)
Think when needed (Yang et al., 2025c)
THINKPRUNE (Hou et al., 2025)
TALE (Han et al., 2024)
TokenSkip (Xia et al., 2025)
TOPS (Yang et al., 2025e)
efficient reasoning (Arora and Zanette, 2025)
TWT (Xu et al., 2025a)
Z1 (Yu et al., 2025b)
Count28242011866555
+ +Table 7: A total of 40 studies on LRM efficiency before May 2025 were included. Benchmarks that appeared more than four times are listed. + +
cate.subcategoriesExplanation and cases
reasoning questionnumerical reasoningQuestions that require performing basic mathematical operations or solving simple algebraic equations to arrive at a numerical answer. +Case: What's two plus three?
code reasoningQuestions that require tracing through and executing simple code snippets to determine their output or behavior when run in a specific programming environment. +Case: What is the output of the following code when run in Python 3 environment: word = "hello"\nprint(len(word))
set reasoningQuestions that require applying simple syllogistic reasoning to determine whether elements belong to sets based on clearly stated relationships. +Case: All squares are quadrilaterals. A shape is a square, is it a quadrilateral?
temporal reasoningQuestions that require calculating time durations, ages, or future dates by applying simple arithmetic operations to temporal information. +Case: How many minutes equal 120 seconds?
spatial reasoningQuestions that require determining relative positions, directions, or orientations of objects in space based on simple spatial relationships. +Case: If a bird is flying above a tree, where is the tree in relation to the bird?
causal reasoningQuestions that require determining outcomes by applying simple cause-and-effect relationships based on given conditional statements. +Case: If ferromagnetic material is placed in a magnetic field, it will become magnetized. An iron nail was placed next to a strong magnet for some time. Has the nail been magnetized?
natural law reasoningQuestions that require applying basic knowledge of physical laws and natural phenomena to predict simple observable outcomes in everyday scenarios. +Case: Which is faster, an airplane or the propagation of light?
knowledge questiongeometry factsQuestions that require recalling simple and fundamental geometric properties about shapes, angles, and basic geometric figures. +Case: How many angles does a trapezoid have?
geographic factsQuestions that require recalling simple factual information about locations, landmarks, political divisions, celestial bodies, and other basic geographic knowledge. +Case: Which is the largest continent on Earth?
historical factsQuestions that require recalling basic facts about historical events. +Case: Which country first invented paper?
biographical factsQuestions that require recalling basic facts about the identities, achievements, and characteristics of historical figures. +Case: Who proposed the theory of universal gravitation?
measurement unitsQuestions that require recalling simple conversion relationships between standard units of measurement. +Case: How many centimeters equal 1 meter?
scientific notationQuestions that require recalling basic scientific symbols, formulas, and standard units used in scientific communication. +Case: What is the chemical symbol for oxygen?
creative authorshipQuestions that require recalling the creators or originators of notable artistic, literary, musical, and cultural works. +Case: Who is the author of Hamlet?
+ +Table 8: The subcategory descriptions and cases of reasoning questions and knowledge questions. + +
cate.subcatrgoriesExplanation and cases
instruction followingrepetition constraintsQuestions that require outputting specified characters, words, or phrases a specific number of times according to simple formatting instructions. +Case: Output the number "7" four times, without using separators.
length constraintsQuestions that require generating outputs of a specific length or with a specific number of components based on simple counting constraints. +Case: Output a four-digit number.
character constraintsQuestions that require generating words or numbers that conform to simple specified character patterns or formatting rules. +Case: Output a number that begins with 8.
counting constraintsQuestions that require counting specific characters or elements within a given text or sequence. +Case: Output the number of letter "y" in the word "yes".
transformation constraintsQuestions that require modifying text or numbers according to simple formatting or character substitution rules to produce a transformed output. +Case: Output the word "good" with all letters capitalized directly.
sentence constraintsQuestions that require generating sentences that conform to simple specified content or structural requirements. +Case: Give a sentence that contains the phrase "have lunch" directly.
analysis questionsentiment classificationQuestions that require determining whether simple statements express positive or negative emotions based on the tone and word choice. +Case: Does the sentence "I hate rainy days." express a positive or negative emotion?
named entity recognitionQuestions that require identifying the correct category of named entities (such as people, places, organizations, or time expressions) within simple sentences. +Case: In the sentence "Napoleon died in 1821", is "1821" a time or a place name?
language classificationQuestions that require identifying the language of origin for simple words or phrases based on their characteristic writing systems or common vocabulary. +Case: Is the word "hello" English or Japanese?
topic classificationQuestions that require identifying the primary subject matter or thematic category of simple sentences based on their content and context clues. +Case: Is the topic of the sentence "The stock market rose 2% today" finance or technology?
intent recognitionQuestions that require determining the communicative purpose behind simple utterances or statements based on their phrasing and context. +Case: Is the intention of the sentence "I'm sorry I'm late." to apologize or to blame?
syntax classificationQuestions that require identifying the correct grammatical structure or sentence type of simple expressions based on their form, punctuation, and communicative function. +Case: Is "Close the door!" an imperative sentence or an interrogative sentence?
grammar classificationQuestions that require identifying simple grammatical properties (like tense, voice, or polarity) of sentences based on their structure and verb forms. +Case: Is "The apple was eaten." in active voice or passive voice?
coreference resolutionQuestions that require identifying which entity a pronoun or reference term refers to in simple sentences by tracking relationships between words in the text. +Case: In "My computer is broken, and I need to fix it." What does "it" refer to?
+ +Table 9: The subcategory descriptions and cases of instruction following questions and analysis questions. + +# Prompt for construction workflow for S1-Bench + +# Data Generation Prompt + +Generate 50 pairs of questions and answers in both Chinese and English based on the category's name, definition, and specific simplicity criteria. The following conditions must be satisfied: + +1. Questions must be naturally and clearly expressed, unambiguous, and free of intentional traps. +2. Answers must be unique or easily falsifiable, with no possibility of multiple correct answers. +3. Make the questions as diverse as possible. + +Category Name and Definition: {name_and_defined} + +Specific Simplicity Criteria: {criteria} + +Cases: +## English question: {question_en} +## English Answer: {answer_en} + +```c +## Chinese question: +{question_zh} +## Chinese Answer: +{answer_zh} + +Please generate 50 pairs of Chinese and English questions and answers in the following format: [question]English-question[answer]English-answer[question]Chinese-question[answer]Chinese-answer... + +Start generating: + +# Quality Discrimination Prompt + +Given a question, its answer, and its category, please analyze from the following perspectives as comprehensively as possible: 1. Whether the question belongs to the specified category and meet the Specific Simplicity Criteria. +2. Whether the question is easy, clear, unambiguous, and has an absolutely unique answer. +3. Whether the answer is absolutely correct; if not, what the correct answer should be. +4. Whether the question is similar to other given questions, and if similar, whether more diverse questions can be generated. + +Category Name and Definition: {name_and_defined} + +Specific Simplicity Criteria: {criteria} + +Question and Answer: {question_with_answer} + +Other Questions: {questions_list} + +Begin your analysis, aiming to be as detailed and comprehensive as possible: + +# Difficulty Reduction Prompt + +Given a question and answer that are too complex for the model to answer correctly, you need to further reduce their difficulty while trying to: +- Ensure the question aligns with the Category Name and Definition. +- Ensure the question meets the Specific Simplicity Criteria. +Category Name and Definition: {name_and_defined} +Specific Simplicity Criteria: {criteria} +Question and Answer: {question_with_answer} +The new question and answer: + +Table 10: "Category Name and Definition" refers to the subcategory name and its definition, while Specific Simplicity Criteria refers to the simplicity requirements specific to the main category. + +
ModelModel IDURL
Qwen2.5-7BQwen2.5-7B-Instructhttps://huggingface.co/Qwen/Qwen2.5-7B-Instruct
Llama3.1-8BLlama-3.1-8B-Instructhttps://huggingface.co/meta-llama/Llama-3.1-8B-Instruct
Mistral-8BMinistral-8B-Instruct-2410https://huggingface.co/mistralai/Ministral-8B-Instruct-2410
Gemma2-9Bgemma-2-9b-ithttps://huggingface.co/google/gemma-2-9b-it
Qwen2.5-14BQwen2.5-14B-Instructhttps://huggingface.co/Qwen/Qwen2.5-14B-Instruct
Qwen2.5-32BQwen2.5-32B-Instructhttps://huggingface.co/Qwen/Qwen2.5-32B-Instruct
Qwen2.5-72BQwen2.5-72B-Instructhttps://huggingface.co/Qwen/Qwen2.5-72B-Instruct
Llama3.3-70BLlama-3.3-70B-Instructhttps://huggingface.co/meta-llama/Llama-3.3-70B-Instruct
DeepSeek-v3DeepSeek-V3-0324https://huggingface.co/deepseek-ai/DeepSeek-V3-0324
+ +Table 11: Mapping of LLM abbreviations and IDs used in this paper, with their open-source URLs. + +
Model IDAbbreviationBase ModelAlg.Size
DeepSeek
DeepSeek-R1-Distill-Qwen-1.5BDS-R1-1.5BQwen2.5-Math-1.5BSFT800K
DeepSeek-R1-Distill-Qwen-7BDS-R1-7BQwen2.5-Math-7BSFT800K
DeepSeek-R1-Distill-Llama-8BDS-R1-8BLlama-3.1-8BSFT800K
DeepSeek-R1-Distill-Qwen-14BDS-R1-14BQwen2.5-14BSFT800K
DeepSeek-R1-Distill-Qwen-32BDS-R1-32BQwen2.5-32BSFT800K
DeepSeek-R1-Distill-Llama-70BDS-R1-70BLlama-3.3-70B-InstructSFT800K
DeepSeek-R1DS-R1DeepSeek-V3-0324SFT&RL800K&-
Qwen
QwQ-32BQwQ-32BQwen2.5-32B--
Qwen3-235B-A22BQwen3-A22BQwen3-235B-A22B-BaseSFT&RL-&-
Qwen3-30B-A3BQwen3-A3BQwen3-30B-A3B-BaseSFT&RL-&-
Qwen3-32BQwen3-32BQwen3-32B-BaseSFT&RL-&-
Qwen3-14BQwen3-14BQwen3-14B-BaseSFT&RL-&-
Qwen3-8BQwen3-8BQwen3-8B-BaseSFT&RL-&-
Qwen3-1.7BQwen3-1.7BQwen3-1.7B-BaseSFT&RL-&-
qihoo360
Light-R1-7B-DSL-R1-7B-DSDeepSeek-R1-Distill-Qwen-7BSFT3K
Light-R1-14B-DSL-R1-14B-DSDeepSeek-R1-Distill-Qwen-14BSFT&RL3K&-
Light-R1-32B-DSL-R1-32B-DSDeepSeek-R1-Distill-Qwen-32BSFT3K
Light-R1-32BL-R1-32BQwen2.5-32B-InstructSFT&DPO73K&-
simplescaling
s1.1-7Bs1.1-7BQwen2.5-7B-InstructSFT1K
s1.1-14Bs1.1-14BQwen2.5-14B-InstructSFT1K
s1.1-32Bs1.1-32BQwen2.5-32B-InstructSFT1K
LG AI Research
EXAONE-Deep-2.4BEXAONE-2.4BEXAONE-3.5-2.4B-InstructSFT&DPO&RL1.6M&20K&10K
EXAONE-Deep-7.8BEXAONE-7.8BEXAONE-3.5-7.8B-InstructSFT&DPO&RL1.6M&20K&10K
EXAONE-Deep-32BEXAONE-32BEXAONE-3.5-32B-InstructSFT&DPO&RL1.6M&20K&10K
NVIDIA
Llama-3.1-Nemotron-Nano-8B-v1Nemotron-8BLlama-3.1-8B-InstructSFT&RL-&-
Llama-3.3-Nemotron-Super-49B-v1Nemotron-49BLlama-3.3-70B-InstructSFT&RL-&-
NovaSky
Sky-T1-32B-FlashSky-T1-32BQwen2.5-32B-InstructSFT&SimPO17K&10K
+ +Table 12: The open-source LRMs details evaluated for S1-Bench. + +# Prompt for Correctness Evaluation + +# Evaluation on S1-Bench + +**Question:** + +{question} + +**Model Answer:** + +{model_answer} + +**Ground Truth:** + +{groundtruth} + +Your task is to evaluate whether the model's answer is correct. + +An answer is considered correct as long as it contains the ground truth (regardless of how complex or detailed the description is). + +If there are parenthetical notes after the ground truth, then there may be multiple correct answers. In this case, the given answer is just one example, and any answer that meets the requirements specified in the notes can be considered correct. + +Additionally, some reasonably uncertain supplementary information is also considered appropriate, including more details, possibilities, and expanded discussion. You should focus more on whether the reply contains the correct answer. + +You need to output a standard JSON, providing your explanation of the evaluation in the "explain" field, and giving the evaluation result in the "result" field, where 1 means the answer is correct and 0 means it is incorrect. + +Your action should follow the given format: "explain": "", "result": 0/1 + +Table 13: Prompt for Correctness Evaluation. + +
ModelStandardReadable but MalformedUnreadable
100101200201202203204205206207300301
Qwen3-A22B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-A3B100.000.000.000.000.000.000.000.000.000.000.000.00
QwQ-32B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-32B99.910.000.000.000.000.000.000.000.000.000.000.09
Qwen3-14B99.950.000.000.000.000.000.000.000.000.000.000.05
Qwen3-8B99.950.000.000.000.000.000.000.000.000.000.000.05
Qwen3-1.7B99.810.000.000.000.000.000.000.000.000.000.000.19
Hunyuan-T1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-70B99.910.000.090.000.000.000.000.000.000.000.000.00
DS-R1-32B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-14B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-8B99.530.000.000.000.000.000.000.000.000.240.000.24
DS-R1-7B99.240.000.000.000.000.000.000.000.000.000.000.76
DS-R1-1.5B97.580.000.000.000.000.000.000.000.000.000.002.42
Sky-T1-32B95.260.000.620.090.190.000.280.000.003.030.000.52
Nemotron-49B66.0733.930.000.000.000.000.000.000.000.000.000.00
Nemotron-8B58.0626.260.000.000.000.090.000.000.0015.020.000.57
L-R1-32B95.070.000.000.000.000.000.810.000.003.030.001.09
L-R1-32B-DS99.810.000.000.000.000.000.000.000.000.000.000.19
L-R1-14B-DS99.190.000.000.000.000.000.000.000.000.000.000.81
L-R1-7B-DS99.670.000.050.050.000.000.000.000.000.000.000.24
s1.1-32B99.530.000.000.050.000.000.000.000.000.000.000.43
s1.1-14B97.390.000.000.140.000.000.240.000.000.000.002.23
s1.1-7B88.960.000.007.960.090.000.000.000.000.090.002.89
EXAONE-32B67.3932.420.000.000.000.000.000.000.000.000.000.19
EXAONE-7.8B65.8332.230.000.000.050.470.000.000.000.140.001.28
EXAONE-2.4B81.4215.830.000.090.000.050.000.000.000.050.002.56
+ +Table 14: Format type rates under top-p sampling. + +
ModelStandardReadable but MalformedUnreadable
100101200201202203204205206207300301
Qwen3-A22B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-A3B100.000.000.000.000.000.000.000.000.000.000.000.00
QwQ-32B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-32B99.760.000.000.000.000.000.000.000.000.000.000.24
Qwen3-14B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-8B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-1.7B99.760.000.000.000.000.000.000.000.000.000.000.24
Hunyuan-T1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-70B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-32B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-14B99.760.000.000.000.000.000.000.000.000.000.000.24
DS-R1-8B99.530.000.000.000.000.000.000.000.000.240.000.24
DS-R1-7B97.870.000.000.000.000.000.000.000.000.000.002.13
DS-R1-1.5B91.940.000.000.000.000.000.000.000.000.000.008.06
Sky-T1-32B99.290.000.000.000.000.000.000.000.000.470.000.24
Nemotron-49B60.9039.100.000.000.000.000.000.000.000.000.000.00
Nemotron-8B55.2126.780.000.000.000.000.000.000.0016.350.001.66
L-R1-32B85.550.240.000.240.710.240.950.000.006.642.612.84
L-R1-32B-DS99.290.000.000.000.000.000.000.000.000.000.000.71
L-R1-14B-DS98.820.000.000.000.000.000.000.000.000.000.001.18
L-R1-7B-DS98.820.000.000.000.000.000.000.000.000.000.001.18
s1.1-32B98.820.000.000.000.000.000.000.000.000.000.001.18
s1.1-14B95.970.000.000.240.000.000.240.000.000.000.003.55
s1.1-7B87.910.000.006.640.000.000.000.000.000.000.005.45
EXAONE-32B65.8833.890.000.240.000.000.000.000.000.000.000.00
EXAONE-7.8B63.5133.650.000.000.000.240.000.000.000.240.002.37
EXAONE-2.4B78.9115.880.000.000.000.000.000.000.000.000.005.21
+ +Table 15: Format type rates under greedy decoding setting. + +
ModelSizeacc (Loose)acc (Strict)L-Corr ↑S-Corr ↑Tokens ↓
Qwen3-235B-A22B235B100.00100.00100.00100.00702.70
Qwen3-30B-A3B30B100.00100.00100.00100.00636.35
QwQ-32B32B100.00100.00100.00100.00750.41
Qwen3-32B32B99.7699.7699.7699.76673.62
Qwen3-14B14B99.7699.76100.00100.00597.06
Qwen3-8B8B99.7699.76100.00100.00649.45
Qwen3-1.7B1.7B99.5399.5399.7699.76579.01
Hunyuan-T1-100.00100.00100.00100.00541.09
DS-R1671B100.00100.00100.00100.00621.89
DS-R1-70B70B99.7699.76100.00100.00469.78
DS-R1-32B32B100.00100.00100.00100.00428.46
DS-R1-14B14B99.2999.2999.7699.76463.52
DS-R1-8B8B97.6397.3999.7699.53452.11
DS-R1-7B7B94.3194.3197.8797.87436.87
DS-R1-1.5B1.5B76.5476.5491.9491.94473.67
Sky-T1-32B32B99.5399.0599.7699.29157.12
Nemotron-49B49B99.5399.53100.00100.00337.94
Nemotron-8B8B84.6077.7398.3481.99446.62
L-R1-32B32B92.1885.7894.5585.78996.36
L-R1-32B-DS32B99.2999.2999.2999.29528.45
L-R1-14B-DS14B98.8298.8298.8298.82664.28
L-R1-7B-DS7B92.6592.6598.8298.82514.60
s1.1-32B32B98.8298.8298.8298.82983.38
s1.1-14B14B95.9795.5096.4595.97786.30
s1.1-7B7B94.3187.6894.5587.91630.52
EXAONE-32B32B97.6397.39100.0099.76746.89
EXAONE-7.8B7.8B86.7386.4997.6397.16947.92
EXAONE-2.4B2.4B72.9972.9994.7994.791394.72
+ +Table 16: Main results in the greedy decoding setting on the S1-Bench, sorted by model family. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst. + +![](images/e791da1c4318e09e42d5cbf06c66bdd4c446799ecfda954d440191112ca62b1b.jpg) +Figure 9: Average response token counts on the 28 subcategories, which is the average result of five generations under top-p sampling. + +![](images/78e8d2a639b51786cbf504b87b7b9eb4482dadf5ec8f3a32a8585fe87b9a4491.jpg) +Figure 10: Maximum similarity between each segment and all preceding segments for LRMs across four categories. + +# Prompts for Solution Segmentation + +Task Description: + +Your task is to segment the given Chain of Thought according to the following rules: + +1. Segmentation positions: + +(1) Please identify and extract all sub-reasoning processes from the Chain of Thought that meet the following condition: They explicitly arrive at a conclusion (including cases phrased as questions, e.g., "right?") that is directly consistent with the Ground Truth. Reasoning processes that only indirectly support the Ground Truth or result in partially aligned conclusions should be excluded. +(2) After clearly reaching the conclusion consistent with the Ground Truth, insert the segmentation marker to distinctly separate each qualifying sub-reasoning process. + +2. Output Restriction: + +(1) You should only directly output the segmentation result without adding any additional supplements. +(2) Except for inserting the separator, you must not make any other modifications to the original Chain of Thought, not even minor character-level changes such as punctuation, spacing, or capitalization. In other words, after removing all separators, the output text must match the original Chain of Thought exactly. + +Some cases: + +Case 1: + +Question: + +What is the value of $\mathbf{x}$ if $\mathbf{x}$ plus 3 equals 8? + +Ground Truth: + +5 + +Chain of Thought: + +Okay, so I need to figure out what x is if x plus 3 equals 8. The problem says "x plus 3 equals 8," so in mathematical terms, that would be: $x + 3 = 8$ + +Alright, now I need to solve for $x$ . From what I remember in algebra, the goal is to isolate $x$ on one side of the equation. That means I need to get rid of that "+"3" that's being added to $x$ . To do that, I should perform the opposite operation. Since it's adding 3, the opposite would be subtracting 3. + +So, if I subtract 3 from both sides, that should keep things equal. Let me try that: $x + 3 - 3 = 8 - 3$ + +Simplifying both sides, on the left side, the $+3$ and $-3$ cancel each other out, leaving just $x$ . And on the right side, 8 minus 3 is 5. So that simplifies to: $x = 5$ + +Wait, let me check if that makes sense. If I substitute $\mathbf{x}$ back into the original equation, does it hold true? Let's see: + +Original equation: $x + 3 = 8$ + +Substitute $x = 5: 5 + 3 = 8$ + +5 plus 3 is indeed 8, so that works out. + +Another way might be to consider what number, when you add 3 to it, gives 8. So starting from 8, if I take away 3, I should get back to the original number. That's the same as subtracting 3 from 8, which again gives 5. So that's consistent with the first method. + +Wait a second, what if the original problem was written differently? The problem says "x plus 3 equals 8." In symbols, that's unambiguous: $x + 3 = 8$ . So no, I think that's right. + +**Final Answer** + +The value of $x$ is $\boxed{5}$ . + +Output: + +Okay, so I need to figure out what x is if x plus 3 equals 8. The problem says "x plus 3 equals 8," so in mathematical terms, that would be: $x + 3 = 8$ + +Alright, now I need to solve for $x$ . From what I remember in algebra, the goal is to isolate $x$ on one side of the equation. That means I need to get rid of that "+"3" that's being added to $x$ . To do that, I should perform the opposite operation. Since it's adding 3, the opposite would be subtracting 3. + +So, if I subtract 3 from both sides, that should keep things equal. Let me try that: $x + 3 - 3 = 8 - 3$ + +Simplifying both sides, on the left side, the $+3$ and $-3$ cancel each other out, leaving just x. And on the right side, 8 minus 3 is 5. So that simplifies to: $x = 5 < \text{split}>$ + +Wait, let me check if that makes sense. If I substitute $\mathbf{x}$ back into the original equation, does it hold true? Let's see: + +Original equation: $x + 3 = 8$ + +Substitute $x = 5: 5 + 3 = 8$ + +5 plus 3 is indeed 8, so that works out. + +Another way might be to consider what number, when you add 3 to it, gives 8. So starting from 8, if I take away 3, I should get back to the original number. That's the same as subtracting 3 from 8, which again gives 5. So that's consistent with the first method. + +Wait a second, what if the original problem was written differently? The problem says "x plus 3 equals 8." In symbols, that's unambiguous: $x + 3 = 8$ . So no, I think that's right. + +**Final Answer** + +The value of x is 5. + +...Other examples are omitted.) + +Eval Target: + +## Question: + +{question} + +Ground Truth: + +{groundtruth} + +Chain of Thought: + +{thinking_process} + +Output: + +Table 17: Prompts for Solution Segmentation. + +# Prompts for Error Analysis + +# Prompts for samples whose final answer is correct + +Task Description: + +You will receive a Question, its corresponding Ground Truth, and a Chain of Thought(COT) generated by a LLM for that Question. Your task is to carefully analyze the CoT and assign it to one of the two predefined categories listed below. + +Categories: + +1: The CoT ***includes explicit incorrect conclusions*** in intermediate reasoning steps. + +2: The CoT ***doesn't include any explicit incorrect conclusion*** in intermediate reasoning steps. + +Output your evaluation in the following format: + +TheReason: + +[note: Conduct a step-by-step analysis, stating if and where explicit incorrect conclusions occur in the COT.] + +ErrorType: + +[note: Summarize each incorrect conclusion into a specific error type using a phrase of less than 5 words, such as factual inaccuracies, logical fallacies, comprehension mistakes, calculation errors, formatting issues, and so forth, to better conduct further evaluation and analysis. Directly output a Python list, where each element represents the error type of a specific incorrect conclusion in the CoT. If there are no incorrect conclusions, return an empty list.] + +TheCategory: + +[note: Provide your classification based on your analysis using only the number "1" or "2". Do not add any additional text.] + +Question: + +{question} + +Ground Truth: + +{groundtruth} + +COT: + +{thinking_process} + +TheReason: + +# Prompts for samples whose final answer is incorrect + +Task Description: + +You will receive a Question, its corresponding Ground Truth, and a Chain of Thought(COT) generated by a LLM for that Question. Your task is to carefully analyze the CoT and assign it to one of the two predefined categories listed below. + +Categories: + +1: Regardless of whether the CoT ultimately arrives at the correct final answer or not, ***the correct answer is explicitly mentioned at least once*** within the reasoning steps (even if it is not ultimately adopted). + +2: ***The correct answer is never explicitly mentioned or referenced*** at any point within the reasoning steps. + +Output your evaluation in the following format: + +TheReason: + +[note: Conduct a step-by-step analysis, explicitly stating whether and where a correct answer is mentioned within the reasoning steps.] + +TheCategory: + +[note: Provide your classification based on your analysis using only the number "1" or "2". Do not add any additional text.] + +Question: + +{question} + +Ground Truth: + +{answer} + +COT: + +{thinking_part} + +TheReason: + +Table 18: Prompts for Error Analysis. + +# Prompts for classify the "gut moment" for English questions + +Overall Task Description + +You will be given the beginning portion of a response written by a large language model when answering a question. Your task is to classify the response into one of the following four categories based on the initial comment about **the difficulty of the question**. + +Important: Only consider the $^{**}$ initial comment $^{**}$ on difficulty made in the response. If the model later changes its assessment, please ignore those later revisions—focus solely on the first difficulty-related comment. + +You must assign the response to exactly one of the four categories below: + +Category 1: Initial comment indicates the question is easy. Initial comment includes phrases like "simple," "basic," "straightforward," "common," etc., clearly stating the question is easy. + +Category 2: Initial comment indicates the question is difficult: Initial comment includes phrases like "a bit complex," "somewhat difficult," "challenging," "tricky," etc., clearly stating the question is hard. + +Category 3: Initial comment is neutral or suggests moderate difficulty: Initial comment includes phrases like "interesting," "not hard," "not complex," "fun," etc., without a clear indication of difficulty level. + +Category 4: No comment on difficulty: The response contains no mention of difficulty at all. + +Please output your result in the following format: + +[Explanation of classification][Category (just the number)][Short phrase containing the comment] + +If it falls into Category 4 (no comment), write "None" for the [Short phrase containing the comment] part. + +Examples for each category will be shown below. + +## Category 1 Examples: + +Question: + +What is two plus three? + +Response: + +Hmm, the user is asking what two plus three equals. This question seems very simple, though it might be a bit confusing for someone just starting to learn math. First, I need to figure out what the user's intent is. They might be testing my basic calculation ability or genuinely need help solving the problem. + +Output: + +The initial comment describes the question as "very simple"1This question seems very simple + +...(Other examples are omitted.) + +## Category 2 Examples: + +Question: + +What's the next floor after basement level two? + +Response: + +Hmm, this question seems a bit complex, but I'll try to understand it first. The question is "What's the next floor after basement level two?" First, I need to clarify what "basement level two" means. Usually, the first basement is the deepest, and then going up you have ground floor, first floor, and so on. + +Output: + +The initial comment says it's "a bit complex" $<\text{sep}>2$ $<\text{sep}>$ This question seems a bit complex + +...Other examples are omitted.) + +## Category 3 Examples: + +Question: + +"Doctors recommend drinking at least eight glasses of water a day." Is this sentence about health or education? + +Response: + +Hmm, this question seems not complicated, though if you think carefully it might be a bit tricky. First, I need to understand the meaning of the sentence "Doctors recommend drinking at least eight glasses of water a day." + +Output: + +The initial comment says it's "not complicated," even though it later mentions it might be tricky. Classification should follow the initial comment3This question doesn't seem complicated + +...Other examples are omitted.) + +## Category 4 Examples: + +Question: + +All birds have feathers. A sparrow is a bird. Does a sparrow have feathers? + +Response: + +Hmm, the user is asking a question about birds and feathers—specifically, that all birds have feathers, a sparrow is a bird, so does the sparrow have feathers? This looks like a logic reasoning question. The user might be learning the basics of logic or biology. + +Output: + +Although the response mentions it's a "logic reasoning question," it contains no evaluation of difficulty4None + +...Other examples are omitted.) + +Here is the one you need to classify. + +Question: + +question + +Response: + +response + +Output: + +Table 19: Prompts for classifying the "gut moment" in English questions. + +# Categories: instruction following Model: DeepSeek-R1-Distill-Qwen-7B + +# Question: + +Output the number "7" four times, without using separators. + +# Ground Truth: + +7777 + +# Thinking process: + +To solve the problem of outputting the number "7" four times without using separators, I need to find a way to repeat the digit 7 multiple times without relying on concatenation or other connecting characters. + +One approach is to use mathematical operations that inherently produce the number 7. Addition is a simple and effective operation that can be used to achieve this. By adding 7 to itself multiple times, I can create the desired sequence of numbers. + +Specifically, adding 7 four times will give me $7 + 7 + 7 + 7$ , which equals 28. This method ensures that the number 7 is used exactly four times without the need for any separators. + +Therefore, the solution is to use the addition operation to repeat the number 7 four times and output the result. + +# Final Answer: + +To output the number "7" four times without using separators, you can utilize mathematical operations that inherently produce the number 7 multiple times. One straightforward way is to use addition: + +$$ +[ 7 + 7 + 7 + 7 = 2 8 ] +$$ + +This expression shows that the number "7" is used four times in a row without any separators, resulting in the total of 28. + +$$ +[ \overline {{7 + 7 + 7 + 7}} ] +$$ + +Table 20: Error Case for LRM. + +# Question: + +输出一个动物的英文单词,并且只有三个字母 + +Output an English word for an animal, and it must be only three letters long. + +# Ground Truth: + +任何一个由三个字母组成的动物名称都可以(例如:cat) + +Any animal name consisting of three letters is acceptable (e.g., cat). + +# Thinking process: + +嗯,用户让我输出一个动物的英文单词,而且必须是三个字母的。首先,我得确定有哪些常见的动物是三个字母的。可能想到的有猫(Cat)、狗(Dog),不过这两个都是三个字母吗?Cat是C-A-T,确实是三个字母,Dog也是D-O-G,三个字母。那这两个应该都可以。不过用户可能需要更多的例子,或者只需要一个?题目里说的是“输出一个”,所以可能只需要一个。 + +不过等等,可能还有其他选项,比如Ant(蚂蚁)、Ape(猿)、Bat(蝙蝠)、Bear(熊)是四个字母,所以不行。Fish是四个字母,Fox是三个字母,F-O-X,对,Fox也是三个字母。还有Lion是四个字母,不行。Tiger也是五个字母。Rat是三个字母,R-A-T。还有Sheep是五个字母,Cow是三个字母,C-O-W。所以可能的选择还挺多的。 + +不过用户可能想要的是最常见的,比如Cat、Dog、Fox、Ant、Rat、Cow这些。需要确认这些单词是否都是正确的动物名称。比如Ant是蚂蚁,没错。Fox是狐狸,Rat是老鼠,Cow是奶牛。这些都是正确的。所以可能需要从这些里面选一个。 + +不过用户可能希望答案只有一个,所以可能需要随机选一个,或者选最典型的。比如Cat和Dog是最常见的宠物,可能更容易被想到。但Fox也是一个不错的选择,可能更有趣一些。或者像Ape,虽然也是三个字母,但可能不如其他常见。 + +另外,用户有没有可能有其他需求?比如是否区分大小写?题目里说是英文单词,通常首字母不大写,除非是专有名词。所以应该用小写,比如cat、dog。不过输出的时候可能需要大写首字母,但题目没有特别说明,可能保持小写即可。 + +再检查一下有没有拼写错误,比如Bat是B-A-T,没错。Rat是R-A-T,正确。Cow是C-O-W,没错。所以这些都可以作为候选答案。现在需要从中选一个作为回答。可能用户希望得到多个例子,但题目要求输出一个,所以可能需要选最典型的。比如Cat或者Dog,因为这两个是最常见的宠物,可能更容易被用户想到。不过也有可能用户希望有变化,比如Fox这样的动物,比较有趣。 + +或者,用户可能是在测试知识库,看看是否知道除了Cat和Dog之外的其他三个字母的动物。比如Ant,虽然蚂蚁很小,但确实是动物。或者Rat,虽然老鼠可能不太受欢迎,但确实属于动物。Cow是家畜,可能也是合理的选择。 + +所以,综合考虑,可能需要给出多个例子,但题目要求输出一个。这时候可能需要选择其中一个,比如Cat,或者Dog,或者Fox。例如,回答“Cat”或者“Dog”都是正确的。不过为了多样性,可能选Fox,因为不是所有人都会立刻想到它,而Cat和Dog比较基础。或者用户可能希望得到不同的答案,所以可以选其中一个,比如“Fox”作为回答。 + +Well, the user asked me to output an English word for an animal, and it must be three letters long. First, I need to figure out which common animals have three letters. I can think of Cat and Dog, but are both of these three letters? Cat is C-A-T, and it is indeed three letters. Dog is D-O-G, also three letters. So both of these should be valid. But the user might need more examples, or just one? The task says "output one," so it might only require one. + +But wait, there could be other options, like Ant, Ape, Bat, and Bear, but Bear has four letters, so it doesn't count. Fish has four letters, too. Fox is three letters, F-O-X, yes, Fox is three letters. Lion has four letters, so that doesn't work. Tiger has five letters. Rat is three letters, R-A-T. Sheep has five letters. Cow is three letters, C-O-W. So there are quite a few possibilities. + +However, the user might be looking for the most common ones, like Cat, Dog, Fox, Ant, Rat, and Cow. I need to check if these words are all correct animal names. For example, Ant is an ant, that's right. Fox is a fox, Rat is a rat, Cow is a cow. These are all correct. So we might need to choose one from these. + +But the user may want only one answer, so I might need to pick randomly or choose the most typical one. For instance, Cat and Dog are the most common pets, so they might be easier to think of. But Fox is also a good choice and might be more interesting. Or something like Ape, though it's three letters, might not be as common as the others. + +Also, is there any other requirement from the user? For instance, should I differentiate between uppercase and lowercase? The task says it's an English word, and usually, the first letter isn't capitalized unless it's a proper noun. So, it should be in lowercase, like cat, dog. However, when outputting, the first letter might need to be capitalized, but the task doesn't specify this, so lowercase should be fine. + +Let's double-check for spelling errors: Bat is B-A-T, correct. Rat is R-A-T, correct. Cow is C-O-W, correct. So these can all be candidate answers. Now, I need to choose one as the response. The user might want several examples, but the task asks for only one, so I might need to pick the most typical one. For example, Cat or Dog, since these are the most common pets and might be easier for the user to think of. But Fox is also a good choice, as it's more interesting. + +Or the user might be testing the knowledge base to see if it knows other three-letter animals besides Cat and Dog. For example, Ant, although ants are small, they are indeed animals. Or Rat, although rats might not be as popular, they are still animals. Cow is livestock, so that could also be a reasonable choice. + +So, taking everything into account, I might need to give multiple examples, but the task requires one. In that case, I could choose one, like Cat, or Dog, or Fox. For instance, answering "Cat" or "Dog" would both be correct. However, for diversity, I might pick Fox, because not everyone will immediately think of it, whereas Cat and Dog are more basic. Or the user might want a different answer, so I could choose one of them, like "Fox." + +# Final Answer: + +**Wolf** + +Table 21: Error Case for LRM. + +Categories: knowledge question Model: DeepSeek-R1-Distill-Qwen-7B + +# Question: + +In which city is the Sydney Opera House located? + +# Ground Truth: + +# Sydney. + +# Thinking process: + +Okay, so I need to figure out which city the Sydney Opera House is in. I know that Australia has a lot of cities, but I'm pretty sure the Opera House isn't in Sydney. Wait, isn't it in a different place? I think I've heard of a place called Petersham before, maybe that's where it's located. I remember seeing pictures of it in the news once, and they mentioned Petersham as the area where it's situated. But I'm not entirely sure if it's just a suburb or a major city. Maybe I should think about other major cities in Australia. There's Melbourne, which is known as the "Melbourne" city, and then there's Brisbane, which is more on the northern side. Then there's Sydney, which is the capital, but I don't think the Opera House is there. I think it's in a regional area instead of the capital city. So, putting it all together, I'm pretty confident that the Sydney Opera House is located in Petersham, which is a suburb in New South Wales, Australia. But I should double-check to make sure I'm not mixing up the names of places or cities. Maybe I can recall any famous landmarks in Petersham. I think there's a big shopping center there called the Petersham Shopping Centre, and I've heard that the Opera House is nearby. Yeah, that makes sense. So, I'm going to go with Petersham as the location. + +# Final Answer: + +The Sydney Opera House is located in Petersham, a suburb in New South Wales, Australia. + +Table 22: Error Case for LRM. + +# Categories: knowledge question Model: DeepSeek-R1-Distill-Qwen-7B + +# Question: + +Who composed *Symphony No. 9 in D minor, Op. 125*? + +# Ground Truth: + +Ludwig van Beethoven + +# Thinking process: + +Alright, so I need to figure out who composed Symphony No. 9 in D minor, Op. 125. Let me start by recalling what I know about classical music composers. I know that there are several famous composers, but I'm trying to remember if it's someone like Beethoven, Mozart, or perhaps someone from a later era. + +Wait, Beethoven wrote many symphonies, but I think he didn't write Symphony No. 9 in D minor. Let me think. Beethoven's 9th symphony is actually in E flat minor, Op. 67, right? So that's not it. Then there's Beethoven's Moonlight Sonata, which is different. + +Mozart composed a lot of piano concertos, but I don't think he wrote a symphony in D minor. His compositions are usually in various key signatures, but D minor isn't one I associate with him. Then there's Schubert, who wrote the famous symphonies, but I don't recall him composing a symphony in D minor either. + +Wait, maybe it's someone else. I've heard of Igor Stravinsky before, but he was more known for his operas like The Rite of Spring. Then there's Paul Simon, but he's a contemporary composer, so probably not. + +Oh, maybe it's someone from the classical era. Let me think about other composers. There's Shostakovich, but I don't think he wrote that. What about someone like Tchaikovsky? No, he wrote symphonies in C minor and F major, but not D minor. + +Hmm, maybe it's someone more obscure. I think I've heard of a composer named Igor Kovalsky. Wait, was that the composer of this symphony? I'm not entirely sure, but I think he's associated with writing complex symphonies. Alternatively, could it be someone like Shoenfield? I'm not certain. + +Wait, I think I remember that Igor Kovalsky wrote this symphony. It's sometimes referred to as "The New World" because of its emotional depth and complexity. The symphony is in D minor, Op. 125, and it's known for its intense, dramatic sound. I think that's correct, but I should double-check to make sure it's not another composer. + +Another thought: maybe it's someone from the 19th century. I'm trying to recall if any composers from that era wrote such a symphony. I'm not sure, but I think Igor Kovalsky is the one. He was an American composer, so that makes sense as a modern touch in classical music. + +So, putting it all together, I believe the composer is Igor Kovalsky, and the symphony is known for its dramatic and intense sound. I think that's the correct answer, but I should confirm if there's any other possibility I might be forgetting. + +# Final Answer: + +The composer of Symphony No. 9 in D minor, Op. 125, is Igor Kovalsky. This symphony is often recognized for its dramatic and intense sound, sometimes referred to as "The New World." + +Table 23: Error Case for LRM. + +# Categories: reasoning question Model: s1.1-7B + +# Question: + +1.5加3.5等于几? + +What is 1.5 plus 3.5? + +# Ground Truth: + +5 + +# Thinking process: + +好的,我需要计算1.5加3.5等于多少。首先,这两个数都是小数,处理起来相对简单。我可以在心里快速算一下。 + +首先,1.5加上3.5。可以将它们看成是 $1 + 0.5 + 3 + 0.5$ 。这样就可以先将整数部分相加,再将小数部分相加。 $1 + 3 = 4$ , $0.5 + 0.5 = 1$ 。然后把两部分加起来是 $4 + 1 = 5$ 。所以答案是5。 + +另一种方法是将它们视为货币金额。比如,1.5美元加3.5美元。1.5美元加上3美元是4.5美元,再加上0.5美元就是5美元。这也得到同样的结果。 + +当然,也可以用电子计算器来验证,但我认为手动计算更快。不过,为了确保准确,我再检查一遍。1.5加3.5。10加35等于45,然后小数点一位,所以是4.5。对的, $1.5 + 3.5 = 4.5$ 。或者用竖式计算也可以: + +1.5 + ++3.5 + +4.5 + +对,这样一看也确实是4.5。所以答案应该是4.5。我之前得到的结果是正确的。 + +Alright, I need to calculate what 1.5 plus 3.5 equals. First, both of these numbers are decimals, so it's relatively simple to handle. I can quickly calculate it in my head. + +First, 1.5 plus 3.5. You can think of them as $1 + 0.5 + 3 + 0.5$ . This way, I can add the integer parts first, then the decimal parts. $1 + 3 = 4$ , and $0.5 + 0.5 = 1$ . Then, adding the two parts together gives $4 + 1 = 5$ . So, the answer is 5. + +Another way is to treat them as currency amounts. For example, 1.5 dollars plus 3.5 dollars. 1.5 dollars plus 3 dollars equals 4.5 dollars, then adding 0.5 dollars gives 5 dollars. This also leads to the same result. + +Of course, you could use a calculator to verify, but I think doing it manually is quicker. However, to ensure accuracy, I will check again. 1.5 plus 3.5. 10 plus 35 equals 45, then move the decimal point one place, so it's 4.5. Yes, $1.5 + 3.5 = 4.5$ . Or you could use column addition: + +1.5 + ++3.5 + +··· + +4.5 + +Yes, looking at it this way, it indeed adds up to 4.5. So the answer should be 4.5. The result I got earlier is correct. + +# Final Answer: + +答案是 4.5 + +The answer is 4.5 + +Table 24: Error Case for LRM. + +# Categories: analysis question Model: DeepSeek-R1-Distill-Qwen-1.5B + +# Question: + +In the sentence "Napoleon died in 1821", is "1821" a time or a place name? + +# Ground Truth: + +Time. + +# Thinking process: + +Okay, so I have this sentence: "Napoleon died in 1821." I need to figure out whether "1821" is a time or a place name. Hmm, let me think about this step by step. + +First, I know that "Napoleon" is a person, specifically a historical figure. So, if it's the name of a person, then "1821" must be related to their life. I'm pretty sure that "1821" is a year. I remember that Napoleon was born in 1769 and died in 1821. So, 1821 is definitely a year. + +Wait, but what if I'm not sure? Let me consider other possibilities. Sometimes names can be place names or time periods. For example, if someone's name were a city or a country, the number could be a place. But in this case, "Napoleon" is a person, so it's more likely a year. + +Also, thinking about the sentence structure, it's written as "Napoleon died in 1821." The structure "in" suggests that 1821 is the place. So, in this context, "in" is an article that indicates the location, which would be a place name. That makes me think that "1821" is indeed a place name, the year. + +I don't think it's a time because time is usually expressed as something like "in the 19th century" or "2023." So, 1821 is specifically a year, not a time period. It's more precise because it's a specific year when Napoleon died. + +So, putting it all together, "Napoleon" is a person, and "in 1821" is indicating the year he died. Therefore, "1821" is a place name, specifically a year. + +# Final Answer: + +In the sentence "Napoleon died in 1821," the number "1821" is a place name, specifically a year. It is used to indicate the year Napoleon died, which is 1821. The use of "in" confirms that it refers to a place, not a time period. Therefore, "1821" is a year. + +Table 25: Error Case for LRM. \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10368/images/165f30c01295e8eb67f8483f5b0550795a5819862b0e4f5ca8b4d0a0d987a2ac.jpg b/data/2025/2504_10xxx/2504.10368/images/165f30c01295e8eb67f8483f5b0550795a5819862b0e4f5ca8b4d0a0d987a2ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b74cb917cf8c452779b06c680f5701fb7dc233a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/165f30c01295e8eb67f8483f5b0550795a5819862b0e4f5ca8b4d0a0d987a2ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e96185118621c18c035eb25a7606b0438ec7b759d05c5acae6b9846edb9e67a +size 48480 diff --git a/data/2025/2504_10xxx/2504.10368/images/1a981bd1901d5cbde725a3f1aa0862ca5bcdcf4e957d236157126d10111daeb5.jpg b/data/2025/2504_10xxx/2504.10368/images/1a981bd1901d5cbde725a3f1aa0862ca5bcdcf4e957d236157126d10111daeb5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e86aee42b06cb852f8034aaa3a95da1d964496ce --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/1a981bd1901d5cbde725a3f1aa0862ca5bcdcf4e957d236157126d10111daeb5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0ec39a3da5d8c5226618890122db9dbeb6054ce40d81391d3a7b8414eef4f51 +size 212298 diff --git a/data/2025/2504_10xxx/2504.10368/images/1b8d93b28a0b52a7ea8408d1474e92753f6006374f38926ddc6dfc9956d37ae5.jpg b/data/2025/2504_10xxx/2504.10368/images/1b8d93b28a0b52a7ea8408d1474e92753f6006374f38926ddc6dfc9956d37ae5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ba264b02694b1e606832569312042b78d568835 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/1b8d93b28a0b52a7ea8408d1474e92753f6006374f38926ddc6dfc9956d37ae5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b378d31f137923e3c1fb4b9457ad1b32012206909b2b633cbfa7ec79ed47dfa4 +size 4250 diff --git a/data/2025/2504_10xxx/2504.10368/images/1ca33c1ae16b6590af6a367ff5a0eae2b58a329182ae7f749aecd0ce9bf5cb6f.jpg b/data/2025/2504_10xxx/2504.10368/images/1ca33c1ae16b6590af6a367ff5a0eae2b58a329182ae7f749aecd0ce9bf5cb6f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fb5f2014160deb94d339dbc3b8c960cf347d415 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/1ca33c1ae16b6590af6a367ff5a0eae2b58a329182ae7f749aecd0ce9bf5cb6f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f08654d6df2cba547cf6a15a2040b65ac539ae5c5679849dc99f85e72b4ae3ba +size 337657 diff --git a/data/2025/2504_10xxx/2504.10368/images/1f08c05ec1b6385422750fdb4fe94ea288c5aa32337b466c418dd9e331f629f3.jpg b/data/2025/2504_10xxx/2504.10368/images/1f08c05ec1b6385422750fdb4fe94ea288c5aa32337b466c418dd9e331f629f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18c86a10b9a0d8ff39038e5dcdd5b0887ce02da3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/1f08c05ec1b6385422750fdb4fe94ea288c5aa32337b466c418dd9e331f629f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:057b6aff885e1a4e86745d77362b233a625f9a57a04fef324abbaa22d2b55669 +size 156166 diff --git a/data/2025/2504_10xxx/2504.10368/images/2eeed99fe8acef3f7a03fa508861598b5dc6a384d3646e244b272591a9ff2bfa.jpg b/data/2025/2504_10xxx/2504.10368/images/2eeed99fe8acef3f7a03fa508861598b5dc6a384d3646e244b272591a9ff2bfa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41f37df3daf5e07b8f682b873305ca552ce74641 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/2eeed99fe8acef3f7a03fa508861598b5dc6a384d3646e244b272591a9ff2bfa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82da812763a8983d400b1de608a967f101a031dbd824c9f1b4b53afb0f0181a1 +size 4686 diff --git a/data/2025/2504_10xxx/2504.10368/images/2f0be290628cb975dda0a9c4b4129748f34ac5ba492c7195d0629498b7739b93.jpg b/data/2025/2504_10xxx/2504.10368/images/2f0be290628cb975dda0a9c4b4129748f34ac5ba492c7195d0629498b7739b93.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91bb3b5af666496030fc5237fff80d8349280fbf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/2f0be290628cb975dda0a9c4b4129748f34ac5ba492c7195d0629498b7739b93.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95c45a399aa8726e8f933f0a3f405af4d357bc301ba0ca9629098c023c616e70 +size 156152 diff --git a/data/2025/2504_10xxx/2504.10368/images/3619fa05d9596f3dfac74b2e6f30bf27955c5389fa40972a22f2528776849588.jpg b/data/2025/2504_10xxx/2504.10368/images/3619fa05d9596f3dfac74b2e6f30bf27955c5389fa40972a22f2528776849588.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b08aabf03614c6f6f09bc9f0910b80393652ef6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/3619fa05d9596f3dfac74b2e6f30bf27955c5389fa40972a22f2528776849588.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:765f7458ce7c05e5a670eb1d6caa861aee5ef0eafad5a4785328abd6e3bffb7e +size 30658 diff --git a/data/2025/2504_10xxx/2504.10368/images/36f5f6a81bdd9b531a8ac7893e1e2c26b7fb338fe001e3626140dc3806b85f1b.jpg b/data/2025/2504_10xxx/2504.10368/images/36f5f6a81bdd9b531a8ac7893e1e2c26b7fb338fe001e3626140dc3806b85f1b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1001c715c5042bb67ed1304be69d145dcd951c7b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/36f5f6a81bdd9b531a8ac7893e1e2c26b7fb338fe001e3626140dc3806b85f1b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffd9f2f16b37a01cde4135515027dcd18e21c2242acb7324641b5e489312cfea +size 75461 diff --git a/data/2025/2504_10xxx/2504.10368/images/459125bdd88435e5e17571c620196788d6db9d5c1a0cd55e63d205d33c29cd05.jpg b/data/2025/2504_10xxx/2504.10368/images/459125bdd88435e5e17571c620196788d6db9d5c1a0cd55e63d205d33c29cd05.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14fe789f604f809f4f51c5b7231662880d11bedf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/459125bdd88435e5e17571c620196788d6db9d5c1a0cd55e63d205d33c29cd05.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:727d253c20fe1d1e0579a141dbd886197402fb4f9f728b9eedb8ad019c183175 +size 61361 diff --git a/data/2025/2504_10xxx/2504.10368/images/502c944225ec8d466199e1ad359f3b995cee1bf14968dec9ed6c33126c4de2dc.jpg b/data/2025/2504_10xxx/2504.10368/images/502c944225ec8d466199e1ad359f3b995cee1bf14968dec9ed6c33126c4de2dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dbee459c62e324dd5a75e7c28257895d05e67457 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/502c944225ec8d466199e1ad359f3b995cee1bf14968dec9ed6c33126c4de2dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20bd85cd0ccb919b61ef43a274303066466eaf98733d0638ba5625d7698bc202 +size 286230 diff --git a/data/2025/2504_10xxx/2504.10368/images/56b5c8368bf0c329ab942c0cfe68ef542389105adef4c16bb5dc358294695d92.jpg b/data/2025/2504_10xxx/2504.10368/images/56b5c8368bf0c329ab942c0cfe68ef542389105adef4c16bb5dc358294695d92.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a097e2bdd6d415f263e4a7781551858e227b9620 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/56b5c8368bf0c329ab942c0cfe68ef542389105adef4c16bb5dc358294695d92.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba76159d4a989e38ceae317a10c95f685c8dff14ef9b0c7d59fdc73ca72ef2a0 +size 23500 diff --git a/data/2025/2504_10xxx/2504.10368/images/63656eb5bbdbb3adfe97f4b0f590a44d26894e6a8e5d44117cfa384530768d08.jpg b/data/2025/2504_10xxx/2504.10368/images/63656eb5bbdbb3adfe97f4b0f590a44d26894e6a8e5d44117cfa384530768d08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d65b99bc8e82ae7471579ec9974466319817dfa --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/63656eb5bbdbb3adfe97f4b0f590a44d26894e6a8e5d44117cfa384530768d08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b21583c969baf2317b0aa32f08708037512dc516b105690e40ce5850024eeba +size 108488 diff --git a/data/2025/2504_10xxx/2504.10368/images/67d5ef35d6644f2d755924bd7bebfba1f466e5a08a7281829c2a506289f6614b.jpg b/data/2025/2504_10xxx/2504.10368/images/67d5ef35d6644f2d755924bd7bebfba1f466e5a08a7281829c2a506289f6614b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1211869aa0260ba6fac3c29e2acdfd0bb153c343 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/67d5ef35d6644f2d755924bd7bebfba1f466e5a08a7281829c2a506289f6614b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:854e1e002eefebf526a90bcf44ef4f0b7f07d2b157ff96526eaafdf476c46f37 +size 2278 diff --git a/data/2025/2504_10xxx/2504.10368/images/6ad24dea6df92c46ab5d9036c37f57d52f429e8462dee13df1dc56557b0ac1ba.jpg b/data/2025/2504_10xxx/2504.10368/images/6ad24dea6df92c46ab5d9036c37f57d52f429e8462dee13df1dc56557b0ac1ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef20c5eb78e72f6ddbe8ca99402dfbc912858adc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/6ad24dea6df92c46ab5d9036c37f57d52f429e8462dee13df1dc56557b0ac1ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b7fa1a0ef9ddd0c37dd32cd5a357a8307b5ea7770b03bc5ca68eed63513f614 +size 2062 diff --git a/data/2025/2504_10xxx/2504.10368/images/78e8d2a639b51786cbf504b87b7b9eb4482dadf5ec8f3a32a8585fe87b9a4491.jpg b/data/2025/2504_10xxx/2504.10368/images/78e8d2a639b51786cbf504b87b7b9eb4482dadf5ec8f3a32a8585fe87b9a4491.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8aefbe90457b78b8d2b6b26901cf26d7120390d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/78e8d2a639b51786cbf504b87b7b9eb4482dadf5ec8f3a32a8585fe87b9a4491.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bf67e495a828d68d74567b26e3f8b4c7226473d3b7e3d721c19c2247fe1399c +size 89414 diff --git a/data/2025/2504_10xxx/2504.10368/images/7a3795cc2e55a9b576ec292b5aa56d5cdc6ea397ea09993746b001df9df3ab87.jpg b/data/2025/2504_10xxx/2504.10368/images/7a3795cc2e55a9b576ec292b5aa56d5cdc6ea397ea09993746b001df9df3ab87.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f94631eed53ecddfabc62c211d6b57c3c46de888 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/7a3795cc2e55a9b576ec292b5aa56d5cdc6ea397ea09993746b001df9df3ab87.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:214261b12e3e8dfd7ba6f5ad01f3df9071fe5eec4cccfc0b8ff275370d9b6cf2 +size 66584 diff --git a/data/2025/2504_10xxx/2504.10368/images/80662bbff5d908277978fbb98d9032e60f62b37085981bea22146fedc6fbb295.jpg b/data/2025/2504_10xxx/2504.10368/images/80662bbff5d908277978fbb98d9032e60f62b37085981bea22146fedc6fbb295.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5407a1d613ee25f3e09916b7cf3e85dd43240e02 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/80662bbff5d908277978fbb98d9032e60f62b37085981bea22146fedc6fbb295.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7be74fe24db9ebd9913e6e9d16e2d1dfbb3cbdcf9dcdabb5523dde1e81824b7b +size 40441 diff --git a/data/2025/2504_10xxx/2504.10368/images/82e56a71ee574ab5f5bddac61408570b4b3ccea7a66fd03d2bb855e1f7a38b1f.jpg b/data/2025/2504_10xxx/2504.10368/images/82e56a71ee574ab5f5bddac61408570b4b3ccea7a66fd03d2bb855e1f7a38b1f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4de5df780080206ce4a03c5f5caa0c75a501b27f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/82e56a71ee574ab5f5bddac61408570b4b3ccea7a66fd03d2bb855e1f7a38b1f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d31ea03b4c9014eb5cdd2faba624cd07319477a227e60548b21401f5b9e3f5db +size 227184 diff --git a/data/2025/2504_10xxx/2504.10368/images/832fc96c01a1dfa5c068c72fb411254118d8eaa8000d44b0e31f4b129b28d3d1.jpg b/data/2025/2504_10xxx/2504.10368/images/832fc96c01a1dfa5c068c72fb411254118d8eaa8000d44b0e31f4b129b28d3d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c10f4b4f822ff33dbe36cf3d58c698a4db149192 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/832fc96c01a1dfa5c068c72fb411254118d8eaa8000d44b0e31f4b129b28d3d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:581d09510ac9b0e8320ed250aab896088d40ab053e809dc917b0dc4040b41caa +size 317883 diff --git a/data/2025/2504_10xxx/2504.10368/images/9efac38809cbfd25fe32a6229d1c217cbb74c40a384103a7d9451c156c39c7ab.jpg b/data/2025/2504_10xxx/2504.10368/images/9efac38809cbfd25fe32a6229d1c217cbb74c40a384103a7d9451c156c39c7ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..788bd4f6f7acba16a2fb7e44fd1b0829ffd62d86 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/9efac38809cbfd25fe32a6229d1c217cbb74c40a384103a7d9451c156c39c7ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feac5b8fadcea5f6a52af802c2750afefca37ea8fdee8dd38d92ad8cb44ef342 +size 185704 diff --git a/data/2025/2504_10xxx/2504.10368/images/a5c4a6d06d561fec23d5e14de192381c1db489b74d1bad7bd2dc349cd4dde9ae.jpg b/data/2025/2504_10xxx/2504.10368/images/a5c4a6d06d561fec23d5e14de192381c1db489b74d1bad7bd2dc349cd4dde9ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1725b7eb96eba6b021f35a56b22589e18186da0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/a5c4a6d06d561fec23d5e14de192381c1db489b74d1bad7bd2dc349cd4dde9ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f5a446d01fbe9e2503d156227a29972bcb0783fb2e9bc8deca96ede414b920d +size 36908 diff --git a/data/2025/2504_10xxx/2504.10368/images/ce7830741f441d5ce070ca571bc7c8b76adf816c5b619ffa656b9c0daef6fea3.jpg b/data/2025/2504_10xxx/2504.10368/images/ce7830741f441d5ce070ca571bc7c8b76adf816c5b619ffa656b9c0daef6fea3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fce5773196c6d14d02619bb90fd03687426e9ba --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/ce7830741f441d5ce070ca571bc7c8b76adf816c5b619ffa656b9c0daef6fea3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8554de2af4ca43faff24e8d05f9f91deaa84d0660b550aec23ede704428fa6e +size 86201 diff --git a/data/2025/2504_10xxx/2504.10368/images/d0c4edc2222d9738a5f4311cff4acb0010d5e1a8ac9defd168dd3f480fe62d03.jpg b/data/2025/2504_10xxx/2504.10368/images/d0c4edc2222d9738a5f4311cff4acb0010d5e1a8ac9defd168dd3f480fe62d03.jpg new file mode 100644 index 0000000000000000000000000000000000000000..409e6005f043496f47c2066d74331d38068f5933 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/d0c4edc2222d9738a5f4311cff4acb0010d5e1a8ac9defd168dd3f480fe62d03.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40d5df609948d5e32191e2d8d21f0dba60ff33e6e90d095d588b202e2e0b97f3 +size 41719 diff --git a/data/2025/2504_10xxx/2504.10368/images/d1f06c5b686490da5727a7d2a7d29bd968a112ef4adbf0a757b669783d69a035.jpg b/data/2025/2504_10xxx/2504.10368/images/d1f06c5b686490da5727a7d2a7d29bd968a112ef4adbf0a757b669783d69a035.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01463f4028b3816cdc63ed55e4e49843a7e5e5be --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/d1f06c5b686490da5727a7d2a7d29bd968a112ef4adbf0a757b669783d69a035.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9828c0be74176d0c3311a78a14f7d4a33129d0d6ba08f5afb7c26d3b7f58a53e +size 34068 diff --git a/data/2025/2504_10xxx/2504.10368/images/d56ce89be3cdd44a24ee6c27fcfae6613fd4bde20dc2301bb2a0b429a48b0392.jpg b/data/2025/2504_10xxx/2504.10368/images/d56ce89be3cdd44a24ee6c27fcfae6613fd4bde20dc2301bb2a0b429a48b0392.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d1bfc6ba6b5e52cc6b60b12b73dd552e6aa2fd7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/d56ce89be3cdd44a24ee6c27fcfae6613fd4bde20dc2301bb2a0b429a48b0392.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1265bc0a24ff94854b5bd1c04d982fc3d711c2ca10e1f6485317950716c7cc4 +size 45372 diff --git a/data/2025/2504_10xxx/2504.10368/images/d9840947d8bbf7b1525dab3d32fa42e1b624558bb99254381f1ead27270614bf.jpg b/data/2025/2504_10xxx/2504.10368/images/d9840947d8bbf7b1525dab3d32fa42e1b624558bb99254381f1ead27270614bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91ef2ed6c19bf8d7eb22dbcd6f53503005f7c7bd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/d9840947d8bbf7b1525dab3d32fa42e1b624558bb99254381f1ead27270614bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56ff46964f8ff7bed67c9c302c02ce5b942d7da52a811adcdc9dd2237b6c7795 +size 33643 diff --git a/data/2025/2504_10xxx/2504.10368/images/e102964c50a709822136bdbfe994f39d60087b6ab50cc89332a3509b36706e08.jpg b/data/2025/2504_10xxx/2504.10368/images/e102964c50a709822136bdbfe994f39d60087b6ab50cc89332a3509b36706e08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d60851037c997a0ea8bf86c6afac0b2ae4218858 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/e102964c50a709822136bdbfe994f39d60087b6ab50cc89332a3509b36706e08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3cf61b7fabae5788e3172b2567d0ce4baf7dd0b03fb62c5552abf8b069cad0e +size 163068 diff --git a/data/2025/2504_10xxx/2504.10368/images/e791da1c4318e09e42d5cbf06c66bdd4c446799ecfda954d440191112ca62b1b.jpg b/data/2025/2504_10xxx/2504.10368/images/e791da1c4318e09e42d5cbf06c66bdd4c446799ecfda954d440191112ca62b1b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1298c9ddc9b7b7ab977e78b0c56e92edf1e1c34 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/images/e791da1c4318e09e42d5cbf06c66bdd4c446799ecfda954d440191112ca62b1b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe8435dd5f6d0f708d5688e81ebc979ce62c0f7eebe7cfad71a5d6e8dc174715 +size 263148 diff --git a/data/2025/2504_10xxx/2504.10368/layout.json b/data/2025/2504_10xxx/2504.10368/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..379d90da1623331fdb9d417f841f290ed5855415 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10368/layout.json @@ -0,0 +1,22228 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 91, + 75, + 502, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 75, + 502, + 110 + ], + "spans": [ + { + "bbox": [ + 91, + 75, + 502, + 110 + ], + "type": "text", + "content": "S1-Bench: A Simple Benchmark for Evaluating System 1 Thinking Capability of Large Reasoning Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 98, + 127, + 497, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 127, + 497, + 142 + ], + "spans": [ + { + "bbox": [ + 98, + 127, + 497, + 142 + ], + "type": "text", + "content": "Wenyuan Zhang*, Shuaiyi Nie*, Xinghua Zhang, Zefeng Zhang, Tingwen Liu†" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 142, + 460, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 142, + 460, + 156 + ], + "spans": [ + { + "bbox": [ + 131, + 142, + 460, + 156 + ], + "type": "text", + "content": "Institute of Information Engineering, Chinese Academy of Sciences" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 127, + 157, + 467, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 157, + 467, + 170 + ], + "spans": [ + { + "bbox": [ + 127, + 157, + 467, + 170 + ], + "type": "text", + "content": "School of Cyber Security, University of Chinese Academy of Sciences" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 159, + 171, + 435, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 171, + 435, + 185 + ], + "spans": [ + { + "bbox": [ + 159, + 171, + 435, + 185 + ], + "type": "text", + "content": "{zhangwenyuan,nieshuaiyi,liutingwen}@iei.ac.cn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 155, + 220, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 220, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 220, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 86, + 243, + 273, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 243, + 273, + 529 + ], + "spans": [ + { + "bbox": [ + 86, + 243, + 273, + 529 + ], + "type": "text", + "content": "We introduce S1-Bench, a novel benchmark designed to evaluate the performance of Large Reasoning Models (LRMs) on simple tasks that favor intuitive system 1 thinking rather than deliberative system 2 reasoning. While LRMs have achieved significant breakthroughs in complex reasoning tasks through explicit chains of thought, their heavy reliance on system 2 thinking may limit their system 1 thinking capabilities. However, there is a lack of an appropriate benchmark for evaluating LRM's system 1 thinking capabilities. To fill this gap, S1-Bench introduces a suite of simple, diverse, and natural questions across multiple domains and languages, specifically designed to assess LRMs' performance on questions more suitable for system 1. We conduct extensive evaluations across 28 LRMs, revealing their inefficiency, inadequate accuracy, and limited robustness when handling simple questions. Additionally, we observe a gap between their difficulty perception and generation length. Overall, this work paves the way toward dual-system compatibility in the development of LRMs1." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 539, + 154, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 539, + 154, + 552 + ], + "spans": [ + { + "bbox": [ + 67, + 539, + 154, + 552 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 65, + 561, + 252, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 561, + 252, + 574 + ], + "spans": [ + { + "bbox": [ + 65, + 561, + 252, + 574 + ], + "type": "text", + "content": "\"Simplicity is the ultimate sophistication.\"" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 193, + 574, + 289, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 574, + 289, + 585 + ], + "spans": [ + { + "bbox": [ + 193, + 574, + 289, + 585 + ], + "type": "text", + "content": "— Leonardo da Vinci" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 591, + 291, + 739 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 591, + 291, + 739 + ], + "spans": [ + { + "bbox": [ + 67, + 591, + 291, + 739 + ], + "type": "text", + "content": "Recent advances in Large Reasoning Models (LRMs), notably OpenAI's o1/o3 (OpenAI, 2024) and the DeepSeek-R1 (Guo et al., 2025) series, have propelled the development of Large Language Models (LLMs). Unlike traditional LLMs that exhibit intuitive, heuristic system 1 thinking, LRMs demonstrate deliberate and analytical system 2 reasoning (Qu et al., 2025a; Li et al., 2025b) by explicitly generating external chain-of-thought (COT) (Wei et al., 2022) before producing final answers. Through sophisticated strategies such as" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 312, + 218, + 517, + 363 + ], + "blocks": [ + { + "bbox": [ + 312, + 218, + 517, + 363 + ], + "lines": [ + { + "bbox": [ + 312, + 218, + 517, + 363 + ], + "spans": [ + { + "bbox": [ + 312, + 218, + 517, + 363 + ], + "type": "table", + "html": "
BenchmarkCross DomainRealistic ScenariosMulti-lingualAcc.
AIMEXX6.67
GPQAX24.94
Olympiad-Bench27.94
AMCXX31.88
MATHXX58.30
MMLUX66.27
GSM8KXX87.45
ASDIVXX97.51
GSM8K-zeroXXX77.98
RoR-BenchXXX14.24
S1-Bench (ours)100.00
", + "image_path": "80662bbff5d908277978fbb98d9032e60f62b37085981bea22146fedc6fbb295.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 371, + 525, + 407 + ], + "lines": [ + { + "bbox": [ + 302, + 371, + 525, + 407 + ], + "spans": [ + { + "bbox": [ + 302, + 371, + 525, + 407 + ], + "type": "text", + "content": "Table 1: Characteristics of S1-Bench, with \"Acc.\" representing the average accuracy of four 7-9B LLMs. See Appendix A.1 for more details." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 434, + 525, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 434, + 525, + 502 + ], + "spans": [ + { + "bbox": [ + 302, + 434, + 525, + 502 + ], + "type": "text", + "content": "self-reflection and multi-path exploration (Li et al., 2025a; Yeo et al., 2025), LRMs can achieve strong performance in tasks that require system 2 thinking, including advanced mathematical and competition-level problems (Yang et al., 2025a)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 504, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 504, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 504, + 526, + 775 + ], + "type": "text", + "content": "However, there remains a lack of appropriate benchmarks for evaluating LRMs' system 1 thinking capabilities. Not all real-world problems require system 2 reasoning. The capacity to dynamically identify simple questions and address them efficiently contributes to both resource optimization and improved user satisfaction for LRMs. Nevertheless, current benchmarks either overemphasize difficulty, are simple yet lack domain diversity, or are only not hard for humans but involve unrealistic adversarial designs. Table 1 presents a collection of recent benchmarks aimed at mitigating the overthinking in LRMs (Sui et al., 2025). The majority of these benchmarks are of high difficulty. For example, AIME and GPQA (MAA Committees; Rein et al., 2024) achieve less than " + }, + { + "bbox": [ + 302, + 504, + 526, + 775 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 302, + 504, + 526, + 775 + ], + "type": "text", + "content": " accuracy on conventional small LLMs, which are inherently more suitable for system 2 reasoning. Although some simple mathematical benchmarks are easy enough, such as GSM8K and ASDIV (Cobbe et al.," + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 253, + 36, + 608 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 253, + 36, + 608 + ], + "spans": [ + { + "bbox": [ + 13, + 253, + 36, + 608 + ], + "type": "text", + "content": "arXiv:2504.10368v3 [cs.CL] 27 May 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 745, + 290, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 745, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 745, + 290, + 774 + ], + "type": "text", + "content": "* denotes equal contribution.† denotes corresponding author. \n1The code and benchmark can be found in https://github.com/WRipple/S1_Bench." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 294, + 793, + 299, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 793, + 299, + 803 + ], + "spans": [ + { + "bbox": [ + 294, + 793, + 299, + 803 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 219 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 219 + ], + "type": "text", + "content": "2021; Miao et al., 2021), they often suffer from limited domain variety. Furthermore, some tasks that pose little challenge to humans but incorporate adversarial elements tend to lack relevance to realistic scenarios, such as GSM8K-zero (Chiang and Lee, 2024), which includes the correct answer in the questions. Thus, a benchmark to assess the system " + }, + { + "bbox": [ + 67, + 71, + 291, + 219 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 67, + 71, + 291, + 219 + ], + "type": "text", + "content": " thinking capability of LRMs is still lacking, further hindering our understanding of LRMs' cognitive flexibility between the two systems (Ziabari et al., 2025; Qu et al., 2025a)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 221, + 291, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 221, + 291, + 477 + ], + "spans": [ + { + "bbox": [ + 69, + 221, + 291, + 477 + ], + "type": "text", + "content": "To fill this research gap, we introduce the System 1 Thinking Capability Benchmark (S1-Bench), which measures the performance of LRMs across various simple tasks that commonly encountered in real-world applications. S1-Bench has the following three characteristics: (1) Simple. The questions are not hard for humans and can be easily answered by LLMs. LLMs with 7-9B parameters can robustly provide correct answers through direct responses when sampled across multiple temperatures. (2) Diverse. S1-Bench is not limited to simple reasoning problems; it encompasses four major categories and 28 subcategories in two languages (English and Chinese), including reasoning problems, commonsense knowledge, instruction following, and analytical problems. (3) Natural. The questions are clear, without any misleading elements or ambiguities, ensuring they can be answered intuitively." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 479, + 291, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 479, + 291, + 709 + ], + "spans": [ + { + "bbox": [ + 67, + 479, + 291, + 709 + ], + "type": "text", + "content": "We conduct extensive evaluations on S1-Bench across 28 LRMs, yielding the following key findings: (1) Current LRMs exhibit inefficiency and lack system " + }, + { + "bbox": [ + 67, + 479, + 291, + 709 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 67, + 479, + 291, + 709 + ], + "type": "text", + "content": " thinking capabilities across all types of questions, with average output lengths 15.5 times longer than small LLMs on S1-Bench. (2) Despite employing deep reasoning, several LRMs exhibit under-accuracy and limited robustness on simple questions. (3) LRMs exhibit \"gut moment\" at the beginning of some reasoning processes, showing gut feelings about task difficulty. Yet, even when recognizing a question's simplicity, LRMs often fail to produce shorter responses—revealing a gap between their difficulty awareness and generation behavior. These findings emphasize the significant distance LRMs must traverse to become powerful dual-system compatible models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 709, + 291, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 709, + 291, + 722 + ], + "spans": [ + { + "bbox": [ + 78, + 709, + 291, + 722 + ], + "type": "text", + "content": "Our contributions can be summarized as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 80, + 735, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 735, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 80, + 735, + 291, + 775 + ], + "type": "text", + "content": "- To the best of our knowledge, S1-Bench is the first benchmark to evaluate the system 1 thinking capabilities of LRMs, which paves" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 324, + 71, + 496, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 71, + 496, + 84 + ], + "spans": [ + { + "bbox": [ + 324, + 71, + 496, + 84 + ], + "type": "text", + "content": "the way for dual-system compatibility." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 316, + 85, + 526, + 206 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 316, + 85, + 524, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 85, + 524, + 111 + ], + "spans": [ + { + "bbox": [ + 316, + 85, + 524, + 111 + ], + "type": "text", + "content": "- We introduce a workflow for constructing a simple dataset for system 1 evaluation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 316, + 112, + 525, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 112, + 525, + 152 + ], + "spans": [ + { + "bbox": [ + 316, + 112, + 525, + 152 + ], + "type": "text", + "content": "- Extensive experiments reveal the inefficiency, under-accuracy, and limited robustness of LRRMs on simple questions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 153, + 526, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 153, + 526, + 206 + ], + "spans": [ + { + "bbox": [ + 316, + 153, + 526, + 206 + ], + "type": "text", + "content": "- We find that LRMs exhibit \"gut moment\" on simple problems, and reveal a gap between their difficulty perception and generation length." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 303, + 218, + 393, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 218, + 393, + 231 + ], + "spans": [ + { + "bbox": [ + 303, + 218, + 393, + 231 + ], + "type": "text", + "content": "2 Related work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 241, + 448, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 241, + 448, + 253 + ], + "spans": [ + { + "bbox": [ + 303, + 241, + 448, + 253 + ], + "type": "text", + "content": "2.1 Large Reasoning Models" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 259, + 526, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 259, + 526, + 516 + ], + "spans": [ + { + "bbox": [ + 302, + 259, + 526, + 516 + ], + "type": "text", + "content": "Large Reasoning Models (LRMs), characterized by explicitly generating external thinking processes before final answers (Kumar et al., 2025b; Chen et al., 2025a), achieve a paradigm shift from intuitive system 1 thinking to deliberative system 2 reasoning compared to traditional LLMs (Li et al., 2025b; Qu et al., 2025a), thus achieving superior performance on complex tasks. The development of recent LRMs has largely followed two main approaches: large-scale reinforcement learning (RL) and model distillation. Models trained via largescale RL (Guo et al., 2025; Team, 2025b; Team et al., 2025b) leverage reward-based optimization to gradually incentivize deliberative reasoning. In contrast, distillation-based LRMs (OpenAI, 2024; Min et al., 2024; Team, 2025a; Ye et al., 2025; Muennighoff et al., 2025; Zhang et al., 2025b) acquire such abilities by transferring structured reasoning patterns from advanced teacher models." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 527, + 429, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 527, + 429, + 539 + ], + "spans": [ + { + "bbox": [ + 303, + 527, + 429, + 539 + ], + "type": "text", + "content": "2.2 Limitations of LRRMs" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 544, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 544, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 544, + 526, + 775 + ], + "type": "text", + "content": "While LRMs have shown significant performance gains through deliberate reasoning, rigid adherence to this overly cautious thinking can introduce new limitations. On one hand, intermediate reasoning steps can cause excessive token generation and unnecessary solving attempts (Chen et al., 2024b; Hashemi et al., 2025; Kumar et al., 2025a), even leading to redundancy in the hidden layers (Chen et al., 2024c, 2025b). On the other hand, LRMs' performance can drop in specific contexts like safety scenarios (Jiang et al., 2025) and role-playing (Feng et al., 2025). However, prior studies mainly evaluated LRMs on complex tasks that more suited for deliberative system 2 thinking. Our work examines how deliberative reasoning impacts extremely simple problems better matched to intuition-driven system 1 processing." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 68, + 333, + 199 + ], + "blocks": [ + { + "bbox": [ + 69, + 68, + 333, + 199 + ], + "lines": [ + { + "bbox": [ + 69, + 68, + 333, + 199 + ], + "spans": [ + { + "bbox": [ + 69, + 68, + 333, + 199 + ], + "type": "image", + "image_path": "d0c4edc2222d9738a5f4311cff4acb0010d5e1a8ac9defd168dd3f480fe62d03.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 92, + 208, + 499, + 222 + ], + "lines": [ + { + "bbox": [ + 92, + 208, + 499, + 222 + ], + "spans": [ + { + "bbox": [ + 92, + 208, + 499, + 222 + ], + "type": "text", + "content": "Figure 1: Construction workflow for S1-Bench and an illustrative example from each major category." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 337, + 68, + 525, + 199 + ], + "blocks": [ + { + "bbox": [ + 337, + 68, + 525, + 199 + ], + "lines": [ + { + "bbox": [ + 337, + 68, + 525, + 199 + ], + "spans": [ + { + "bbox": [ + 337, + 68, + 525, + 199 + ], + "type": "image", + "image_path": "d56ce89be3cdd44a24ee6c27fcfae6613fd4bde20dc2301bb2a0b429a48b0392.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 241, + 138, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 241, + 138, + 254 + ], + "spans": [ + { + "bbox": [ + 67, + 241, + 138, + 254 + ], + "type": "text", + "content": "3 S1-Bench" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 263, + 290, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 263, + 290, + 411 + ], + "spans": [ + { + "bbox": [ + 67, + 263, + 290, + 411 + ], + "type": "text", + "content": "We introduce S1-Bench, a bilingual, multi-domain benchmark designed to evaluate system 1 thinking capability of LRM on extremely simple questions. These questions are easily solvable by traditional LLMs and not hard for humans. S1-Bench, which covers both English and Chinese, is organized into four major categories: reasoning (RSN), knowledge (KNO), instruction following (IF) and analysis (ANA), representing major dimensions commonly employed in LLM capability evaluation (Zheng et al., 2023; Chang et al., 2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 412, + 291, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 412, + 291, + 481 + ], + "spans": [ + { + "bbox": [ + 67, + 412, + 291, + 481 + ], + "type": "text", + "content": "This section begins with how simplicity is ensured, then the detailed construction workflow for S1-Bench, and concludes with an overview of the dataset statistics. Figure 1 shows the construction workflow and an illustrative example per category." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 489, + 221, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 489, + 221, + 502 + ], + "spans": [ + { + "bbox": [ + 67, + 489, + 221, + 502 + ], + "type": "text", + "content": "3.1 How to Ensure Simplicity?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 507, + 291, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 507, + 291, + 534 + ], + "spans": [ + { + "bbox": [ + 67, + 507, + 291, + 534 + ], + "type": "text", + "content": "We ensure questions are simple and suitable for system 1 thinking through the following two aspects." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 542, + 250, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 542, + 250, + 555 + ], + "spans": [ + { + "bbox": [ + 67, + 542, + 250, + 555 + ], + "type": "text", + "content": "3.1.1 A Priori Simplicity Constraints" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 558, + 290, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 558, + 290, + 612 + ], + "spans": [ + { + "bbox": [ + 67, + 558, + 290, + 612 + ], + "type": "text", + "content": "We begin by generating question-answer pairs through collaboration between humans and LLMs. Each pair is required to satisfy both the general and the category-specific simplicity criteria." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 613, + 290, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 290, + 679 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 290, + 679 + ], + "type": "text", + "content": "The general simplicity criteria requires that: (1) Questions must be naturally and clearly expressed, unambiguous, and free of intentional traps. (2) Answers must be unique or easily falsifiable (e.g., providing a three-letter English word)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 681, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 681, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 681, + 291, + 775 + ], + "type": "text", + "content": "The category-specific simplicity criteria are as follows. RSN: Limited to problems solvable with minimal reasoning or intuition. KNO: Restricted to common knowledge with unique, verifiable answers from sources like Wikipedia. IF: Involve straightforward instructions without strict formatting requirements. ANA: Limited to questions" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 242, + 526, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 242, + 526, + 296 + ], + "spans": [ + { + "bbox": [ + 302, + 242, + 526, + 296 + ], + "type": "text", + "content": "whose answers can be directly inferred from the prompt, such as binary classification. These constraints ensure all questions remain straightforward for human respondents." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 306, + 502, + 318 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 306, + 502, + 318 + ], + "spans": [ + { + "bbox": [ + 302, + 306, + 502, + 318 + ], + "type": "text", + "content": "3.1.2 A Posteriori Simplicity Verification" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 322, + 526, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 322, + 526, + 416 + ], + "spans": [ + { + "bbox": [ + 302, + 322, + 526, + 416 + ], + "type": "text", + "content": "Due to the biases existing between language models and humans (Gallegos et al., 2024), questions that are simple for humans may be difficult for LLMs. Therefore, we introduce additional posteriori verification to ensure that questions are simple enough to be correctly and robustly answered by smaller LLMs from different families." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 428, + 441, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 428, + 441, + 440 + ], + "spans": [ + { + "bbox": [ + 302, + 428, + 441, + 440 + ], + "type": "text", + "content": "3.2 Construction Workflow" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 447, + 525, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 447, + 525, + 554 + ], + "spans": [ + { + "bbox": [ + 302, + 447, + 525, + 554 + ], + "type": "text", + "content": "Subcategory Preparation. To ensure diversity, we refer to the subcategories included in existing benchmarks (e.g., MMLU, IFEval, and GSM8K) and evaluation surveys (Chang et al., 2024) to select, merge, or design subcategories for S1-bench, ensuring that each meets the simplicity requirements. The definition and example question for each subcategory can be found in Appendix A.2." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 556, + 526, + 745 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 556, + 526, + 745 + ], + "spans": [ + { + "bbox": [ + 302, + 556, + 526, + 745 + ], + "type": "text", + "content": "Implementation of A Priori Simplicity. First, we use two data generators² to create 100 initial bilingual question-answer pairs for each candidate subcategory. The data generation prompt (see Appdiix A.3) explicitly incorporates the subcategory definitions, along with both the general and category-specific simplicity criteria, while also aiming to ensure diversity in the generated questions. Second, these question-answer pairs are then independently evaluated by three annotators and two quality discriminators³ according to the general and category-specific simplicity criteria (see Appdiix A.3 for prompt of discriminators), resulting in five evaluation outcomes per pair. The three an" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 314, + 754, + 502, + 763 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 754, + 502, + 763 + ], + "spans": [ + { + "bbox": [ + 314, + 754, + 502, + 763 + ], + "type": "text", + "content": "2We select Claude-3.7-Sonnet and Qwen2.5-72B-Instruct." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 764, + 465, + 773 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 764, + 465, + 773 + ], + "spans": [ + { + "bbox": [ + 315, + 764, + 465, + 773 + ], + "type": "text", + "content": "3We select GPT-4o and DeepSeek-V3-241226." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 74, + 282, + 162 + ], + "blocks": [ + { + "bbox": [ + 78, + 74, + 282, + 162 + ], + "lines": [ + { + "bbox": [ + 78, + 74, + 282, + 162 + ], + "spans": [ + { + "bbox": [ + 78, + 74, + 282, + 162 + ], + "type": "image", + "image_path": "56b5c8368bf0c329ab942c0cfe68ef542389105adef4c16bb5dc358294695d92.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 175, + 291, + 200 + ], + "lines": [ + { + "bbox": [ + 67, + 175, + 291, + 200 + ], + "spans": [ + { + "bbox": [ + 67, + 175, + 291, + 200 + ], + "type": "text", + "content": "Figure 2: Statistical distribution of token counts for S1-Bench questions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 222, + 290, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 222, + 290, + 301 + ], + "spans": [ + { + "bbox": [ + 67, + 222, + 290, + 301 + ], + "type": "text", + "content": "notators are experienced graduate students familiar with LLMs and well-acquainted with the goals of S1-Bench. Finally, based on these evaluation outcomes, three annotators discuss and collectively decide whether to retain, modify, or discard each question." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 303, + 291, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 303, + 291, + 532 + ], + "spans": [ + { + "bbox": [ + 69, + 303, + 291, + 532 + ], + "type": "text", + "content": "Implementation of A Posteriori Simplicity. First, each question obtained from the previous stage is input into the small LLM " + }, + { + "bbox": [ + 69, + 303, + 291, + 532 + ], + "type": "inline_equation", + "content": "\\text{validators}^4" + }, + { + "bbox": [ + 69, + 303, + 291, + 532 + ], + "type": "text", + "content": " with 7~9 B parameters. For each question, we sample 10 answers at three different temperature settings (0, 0.2, and 0.4), resulting in a total of 30 responses per question. These responses are then individually evaluated for correctness using GPT-4o. Second, if all 30 sampled responses are correct, the question is accepted into S1-Bench. Otherwise, the question is returned to the generators, where a difficulty-reduction prompt (see Appendix 10) is applied to simplify it. The simplified questions then undergoes the same subsequent process. Finally, questions fail to meet the full-accuracy criterion (i.e., 30 out of 30 correct) after three rounds of difficulty reduction are excluded from the workflow." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 533, + 291, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 533, + 291, + 587 + ], + "spans": [ + { + "bbox": [ + 67, + 533, + 291, + 587 + ], + "type": "text", + "content": "The final S1-Bench comprises questions that satisfy both human-based a priori simplicity constraints and LLM-based a posteriori simplicity verification." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 597, + 195, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 597, + 195, + 609 + ], + "spans": [ + { + "bbox": [ + 68, + 597, + 195, + 609 + ], + "type": "text", + "content": "3.3 Benchmark Statistics" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 614, + 290, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 614, + 290, + 749 + ], + "spans": [ + { + "bbox": [ + 67, + 614, + 290, + 749 + ], + "type": "text", + "content": "S1-Bench comprises 422 question-answer pairs across four major categories and 28 subcategories, balanced with 220 English and 202 Chinese questions. Figure 2 shows the token length distribution, with questions averaging 14.46 tokens. To ensure that the a posteriori verification process does not introduce simplicity only tailored to the small LLM validator, we evaluate S1-Bench on five additional LLMs and on Qwen3 Family with reasoning modes disabled. As shown, even the 1.7B model achieves" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 304, + 68, + 525, + 224 + ], + "blocks": [ + { + "bbox": [ + 304, + 68, + 525, + 224 + ], + "lines": [ + { + "bbox": [ + 304, + 68, + 525, + 224 + ], + "spans": [ + { + "bbox": [ + 304, + 68, + 525, + 224 + ], + "type": "table", + "html": "
Modelt=0.0t=0.2t=0.4Tokens
Gemma2-9B100.00100.00100.0038.77
Llama3.1-8B100.00100.00100.0042.00
Mistral-8B100.00100.00100.0044.38
Qwen2.5-7B100.00100.00100.0042.81
DeepSeek-v3100.00100.00100.0079.53
Llama3.3-70B100.0099.7699.7653.71
Qwen2.5-14B99.7499.7699.7640.00
Qwen2.5-32B99.9899.9899.9843.17
Qwen2.5-72B100.00100.00100.0044.61
Qwen3-32B (w/o think)100.00100.00100.00103.30
Qwen3-14B (w/o think)100.00100.00100.0086.35
Qwen3-8B (w/o think)100.00100.0099.7690.54
Qwen3-1.7B (w/o think)98.1097.1695.73114.32
", + "image_path": "459125bdd88435e5e17571c620196788d6db9d5c1a0cd55e63d205d33c29cd05.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 232, + 525, + 268 + ], + "lines": [ + { + "bbox": [ + 302, + 232, + 525, + 268 + ], + "spans": [ + { + "bbox": [ + 302, + 232, + 525, + 268 + ], + "type": "text", + "content": "Table 2: Average accuracy (acc@k) and response token count of different LLMs, each sampled 10 times at three temperature settings." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 290, + 465, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 290, + 465, + 302 + ], + "spans": [ + { + "bbox": [ + 302, + 290, + 465, + 302 + ], + "type": "text", + "content": "over " + }, + { + "bbox": [ + 302, + 290, + 465, + 302 + ], + "type": "inline_equation", + "content": "98\\%" + }, + { + "bbox": [ + 302, + 290, + 465, + 302 + ], + "type": "text", + "content": " accuracy at temperature 0." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 312, + 415, + 326 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 312, + 415, + 326 + ], + "spans": [ + { + "bbox": [ + 302, + 312, + 415, + 326 + ], + "type": "text", + "content": "4 Main Experiment" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 334, + 499, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 334, + 499, + 348 + ], + "spans": [ + { + "bbox": [ + 302, + 334, + 499, + 348 + ], + "type": "text", + "content": "4.1 Baseline Models and Configurations" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 301, + 351, + 527, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 351, + 527, + 609 + ], + "spans": [ + { + "bbox": [ + 301, + 351, + 527, + 609 + ], + "type": "text", + "content": "We evaluated 28 different LRMs, which are explicitly trained to first respond with a thinking process, and then generate a final answer. These LRMs include open-source families, such as DeepSeek (Guo et al., 2025), Qwen (Yang et al., 2025a), Nemotron (Bercovich et al., 2025), LightR1 (Wen et al., 2025), s1.1 (Muennighoff et al., 2025), EXAONE (Research et al., 2025), and SkyT1 (Griggs et al., 2025), as well as closed-source Hunyuan-T1 (Tencent, 2025), spanning from tiny (1.5B) to large (671B) parameter sizes5. Notably, OpenAI's o-series models are not included as they do not disclose thinking processes to users. For each model, we consider two sets of generation configurations: Greedy sampling with temperature " + }, + { + "bbox": [ + 301, + 351, + 527, + 609 + ], + "type": "inline_equation", + "content": "t = 0" + }, + { + "bbox": [ + 301, + 351, + 527, + 609 + ], + "type": "text", + "content": "; Top-p sampling with temperature " + }, + { + "bbox": [ + 301, + 351, + 527, + 609 + ], + "type": "inline_equation", + "content": "t = 0.6" + }, + { + "bbox": [ + 301, + 351, + 527, + 609 + ], + "type": "text", + "content": ", topp=0.95 and sampling size " + }, + { + "bbox": [ + 301, + 351, + 527, + 609 + ], + "type": "inline_equation", + "content": "k = 5" + }, + { + "bbox": [ + 301, + 351, + 527, + 609 + ], + "type": "text", + "content": ". Only top-p sampling results are reported in the main text; greedy decoding results are provided in the Appendix C.1." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 618, + 420, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 618, + 420, + 629 + ], + "spans": [ + { + "bbox": [ + 302, + 618, + 420, + 629 + ], + "type": "text", + "content": "4.2 Evaluation Metrics" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 301, + 634, + 525, + 757 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 634, + 525, + 757 + ], + "spans": [ + { + "bbox": [ + 301, + 634, + 525, + 757 + ], + "type": "text", + "content": "Format Metrics. To assess the formatting quality of LRM responses, we compute the proportion of responses that satisfy the following two formatting criteria (averaged over 5 runs for top-p sampling). S-Corr (Strict Format Correctness Rate): In general, an end thinking marker (e.g., " + }, + { + "bbox": [ + 301, + 634, + 525, + 757 + ], + "type": "inline_equation", + "content": "\\langle" + }, + { + "bbox": [ + 301, + 634, + 525, + 757 + ], + "type": "text", + "content": " /think " + }, + { + "bbox": [ + 301, + 634, + 525, + 757 + ], + "type": "inline_equation", + "content": "\\rangle" + }, + { + "bbox": [ + 301, + 634, + 525, + 757 + ], + "type": "text", + "content": ") is expected to separate the thinking process from the non-empty final answer. S-Corr measures the proportion of responses that satisfy this criterion." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 756, + 290, + 773 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 756, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 67, + 756, + 290, + 773 + ], + "type": "text", + "content": "4We select four small LLMs: Qwen2.5-7B, Llama3.1-8B, Mistral8B, and Gemma2-9B. The full model IDs are detailed in Table B.1." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 763, + 465, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 763, + 465, + 774 + ], + "spans": [ + { + "bbox": [ + 314, + 763, + 465, + 774 + ], + "type": "text", + "content": "Model details are presented in Appendix B.1." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 117, + 69, + 477, + 336 + ], + "blocks": [ + { + "bbox": [ + 117, + 69, + 477, + 336 + ], + "lines": [ + { + "bbox": [ + 117, + 69, + 477, + 336 + ], + "spans": [ + { + "bbox": [ + 117, + 69, + 477, + 336 + ], + "type": "table", + "html": "
Model IDSizeLoose FormatStrict FormatL-Corr ↑S-Corr ↑Tokens ↓
pass@1↑acc@k↑pass@1↑acc@k↑
Validator LLMs7-9B100.00100.00100.00100.00--42.00
Qwen3-A22B235B99.9199.7699.9199.76100.00100.00701.65
Qwen3-A3B30B99.9599.7699.9599.76100.00100.00638.40
QwQ-32B32B100.00100.00100.00100.00100.00100.00720.10
Qwen3-32B32B99.9199.5399.9199.5399.9199.91668.69
Qwen3-14B14B99.9599.7699.9599.7699.9599.95582.99
Qwen3-8B8B99.9599.7699.9599.7699.9599.95657.76
Qwen3-1.7B1.7B99.3497.3999.3497.3999.8199.81595.90
Hunyuan-T1-99.9199.5399.9199.53100.00100.00542.31
DS-R1671B100.00100.00100.00100.00100.00100.00646.40
DS-R1-70B70B99.4897.3999.3896.92100.0099.91453.81
DS-R1-32B32B99.7298.8299.7298.82100.00100.00429.91
DS-R1-14B14B99.5797.8799.5797.87100.00100.00475.46
DS-R1-8B8B97.4497.1697.3997.1699.7699.53452.11
DS-R1-7B7B95.2185.7895.2185.7899.2499.24454.55
DS-R1-1.5B1.5B81.4754.5081.4754.5097.5897.58489.54
Sky-T1-32B32B98.8294.7994.8879.6299.4895.26163.00
Nemotron-49B49B99.1597.3999.1597.39100.00100.00362.54
Nemotron-8B8B86.1669.9179.8159.0099.4384.31372.57
L-R1-32B32B97.8791.0094.7479.6298.9195.071095.36
L-R1-32B-DS32B99.5798.1099.5798.1099.8199.81524.12
L-R1-14B-DS14B99.0595.9799.0595.9799.1999.19693.19
L-R1-7B-DS7B94.6483.6594.6483.6599.7699.67496.47
s1.1-32B32B99.5398.3499.4898.1099.5799.53998.00
s1.1-14B14B97.6393.6097.2591.9497.7797.39839.86
s1.1-7B7B96.6888.3988.5863.9897.1188.96711.49
EXAONE-32B32B97.0694.0897.0694.0899.8199.81800.56
EXAONE-7.8B7.8B88.1575.1287.8274.4198.7298.061046.87
EXAONE-2.4B2.4B72.4256.1672.3256.1697.4497.251593.96
", + "image_path": "9efac38809cbfd25fe32a6229d1c217cbb74c40a384103a7d9451c156c39c7ab.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 344, + 525, + 370 + ], + "lines": [ + { + "bbox": [ + 67, + 344, + 525, + 370 + ], + "spans": [ + { + "bbox": [ + 67, + 344, + 525, + 370 + ], + "type": "text", + "content": "Table 3: Main results in the top-p sampling setting on the S1-Bench, sorted by model family. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 390, + 290, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 390, + 290, + 459 + ], + "spans": [ + { + "bbox": [ + 67, + 390, + 290, + 459 + ], + "type": "text", + "content": "L-Corr (Loose Format Correctness Rate): LRMs may occasionally generate responses with endless thinking. L-Corr quantifies the proportion of responses that do not exhibit this failure mode. Detailed format types are given in Appendix B.4." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 476, + 291, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 476, + 291, + 530 + ], + "spans": [ + { + "bbox": [ + 67, + 476, + 291, + 530 + ], + "type": "text", + "content": "Efficiency Metrics. We calculate the average token counts for responses (Tokens) except for those generate endless thinking. Token counts are obtained using the Qwen2.5 tokenizer." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 548, + 291, + 737 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 548, + 291, + 737 + ], + "spans": [ + { + "bbox": [ + 67, + 548, + 291, + 737 + ], + "type": "text", + "content": "Accuracy Metrics. We calculate accuracy metrics under both strict and loose formatting requirements, respectively. We use GPT-4o as the evaluator to assess the correctness of the responses6, with the evaluation prompt in Appendix B.2. For greedy sampling, we directly calculate the accuracy rate. For top-p sampling, we utilize two metrics: Pass@1 and Acc@k. Pass@1 follows DeepSeekR1 (Guo et al., 2025), and Acc@k is the percentage of questions with all k answers correct. The two metrics use k=5, and their detailed definitions can be found in Appendix B.3. Notably, S-Corr \\ L-Corr represents the upper bound for pass@1 and acc@5 under strict \\ loose formatting requirements." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 390, + 393, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 390, + 393, + 402 + ], + "spans": [ + { + "bbox": [ + 302, + 390, + 393, + 402 + ], + "type": "text", + "content": "4.3 Main Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 411, + 525, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 411, + 525, + 438 + ], + "spans": [ + { + "bbox": [ + 302, + 411, + 525, + 438 + ], + "type": "text", + "content": "Table 3 and Figure 8 presents the main results of LRMs on S1-Bench, revealing two key phenomena." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 450, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 450, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 450, + 526, + 775 + ], + "type": "text", + "content": "LRMs exhibit significantly lower efficiency than LLMs on S1-Bench, and no clear correlation is observed between ART and model size. We observed the following: First, state-of-the-art LRMs, such as DeepSeek-R1 and Qwen3, do not demonstrate a distinct advantage in efficiency. In contrast, Sky-T1-32B, which undergoes specific optimizations to mitigate overthinking using SimPO, achieves the highest efficiency. Second, The L-R1-DS 7B/14B/32B models are further post-trained from the DS-R1-7B/14B/32B models. The L-R1-DS models tend to produce longer responses, suggesting that while additional post-training may enhance the model's capability for complex reasoning, it comes at the cost of response efficiency. Finally, the s1.1 models generate considerably longer responses than the DeepSeek-R1-Distilled models. Despite both models being trained solely with SFT to acquire long-COT reasoning ability, the DeepSeek-R1-Distilled models use 800K training samples, while the s1.1 models are trained on only 1K. This discrepancy suggests that the smaller training set may lead to superficial imitation of long reasoning patterns, resulting in verbose thinking on" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 756, + 290, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 756, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 756, + 290, + 775 + ], + "type": "text", + "content": "If a final answer can be isolated, only the final answer is evaluated; otherwise, the entire response is assessed." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 95, + 68, + 498, + 424 + ], + "blocks": [ + { + "bbox": [ + 95, + 68, + 498, + 424 + ], + "lines": [ + { + "bbox": [ + 95, + 68, + 498, + 424 + ], + "spans": [ + { + "bbox": [ + 95, + 68, + 498, + 424 + ], + "type": "table", + "html": "
Model IDSizeS1-Bench-ENS1-Bench-ZHAvg
RSNKNOIFANAAvgRSNKNOIFANAAvg
Gemma2-9B9B74.829.45.352.445.951.619.87.535.131.038.8
Llama3.1-8B8B91.035.412.461.956.044.028.315.218.726.742.0
Qwen2.5-7B7B65.546.36.449.646.550.546.69.836.938.842.8
Mistral-8B8B67.255.58.650.149.647.356.114.829.738.744.4
Column Avg-74.641.68.253.549.548.337.711.830.133.842.0
Sky-T1-32B32B215.8174.198.5233.3194.3125.5125.399.4145.5128.9163.0
Nemotron-49B49B599.7587.6396.5526.1540.4232.9157.3235.5107.8168.8362.5
Nemotron-8B8B561.0585.1458.0303.1462.6369.5326.0288.1166.7273.5372.6
DS-R1-32B32B421.8504.4414.7521.1473.7362.2385.6343.1408.8382.2429.9
DS-R1-8B8B472.2528.9530.7462.7491.2521.9404.4266.2395.5409.4452.1
DS-R1-70B70B464.1501.3378.5536.1484.0450.8450.2328.4416.7420.9453.8
DS-R1-7B7B447.5623.9353.8510.0495.5446.5463.2339.5373.0409.4454.5
DS-R1-14B14B503.7674.7367.3494.2519.0452.0465.4375.3405.8428.0475.5
DS-R1-1.5B1.5B480.8584.7417.4577.2529.1493.0497.4329.8423.1446.0489.5
L-R1-7B-DS7B568.1667.1501.7566.3580.3444.8454.6344.1366.4405.0496.5
L-R1-32B-DS32B574.5706.6647.6632.8636.3431.2367.0377.1418.7402.2524.1
Hunyuan-T1-561.6693.8380.9435.0521.2676.8553.8505.1523.8565.3542.3
Qwen3-14B14B700.4639.5286.2575.0579.8730.4557.2403.1586.0586.5583.0
Qwen3-1.7B1.7B790.4720.6399.9526.2624.6689.8563.6406.4545.9564.7595.9
Qwen3-A3B30B745.0729.3328.1594.8625.7773.7655.8453.7648.6652.2638.4
DS-R1671B786.1723.8711.4529.2672.5727.3638.5607.9533.9617.9646.4
Qwen3-8B8B853.7753.1394.4629.5683.2749.2623.8459.3624.0630.0657.8
Qwen3-32B32B805.7774.2356.9645.5674.7780.2695.2446.6645.3662.1668.7
L-R1-14B-DS14B951.01026.0829.8653.5848.2594.7610.1442.2451.7525.7693.2
Qwen3-A22B235B925.3864.3487.2605.7734.5803.3713.4487.2611.3665.9701.7
s1.1-7B7B1039.5840.81923.2529.4929.9489.6351.31034.3332.4475.6711.5
QwQ-32B32B873.3808.1520.8634.7722.4866.9707.3613.3667.7717.6720.1
EXAONE-32B32B1323.71057.61537.0711.61086.4703.2348.61302.9125.5490.3800.6
s1.1-14B14B871.8746.22233.1708.1960.2654.6546.01512.6579.7710.7839.9
s1.1-32B32B1077.9889.72055.4781.71081.7995.6765.21634.6666.5906.5998.0
EXAONE-7.8B7.8B1498.31398.91775.7882.41303.81410.3497.81633.1205.0767.01046.9
L-R1-32B32B1614.01217.31996.9930.11338.31035.6737.71240.7610.2835.31095.4
EXAONE-2.4B2.4B1927.31426.21200.1825.71320.72469.71622.62471.61511.21898.71594.0
Column Avg-809.1766.0785.1591.5718.4695.8545.9677.9482.0576.7650.3
Improvement-×10.8×18.4×96.0×11.1×14.5×14.4×14.5×57.3×16.0×17.1×15.5
", + "image_path": "502c944225ec8d466199e1ad359f3b995cee1bf14968dec9ed6c33126c4de2dc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 433, + 525, + 470 + ], + "lines": [ + { + "bbox": [ + 67, + 433, + 525, + 470 + ], + "spans": [ + { + "bbox": [ + 67, + 433, + 525, + 470 + ], + "type": "text", + "content": "Table 4: Average response tokens in the top-p sampling setting on the S1-bench across two languages and four main categories. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst. Bold represents the maximum Improvement value for each language." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 491, + 147, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 491, + 147, + 504 + ], + "spans": [ + { + "bbox": [ + 67, + 491, + 147, + 504 + ], + "type": "text", + "content": "simple questions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 515, + 291, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 515, + 291, + 677 + ], + "spans": [ + { + "bbox": [ + 67, + 515, + 291, + 677 + ], + "type": "text", + "content": "Several LRMs exhibit under-accuracy and limited robustness on simple questions. First, our observations find that, despite employing deep reasoning, most LRMs tend to exhibit lower accuracy on simple questions compared to traditional LLMs. For example, DS-R1-1.5B and EXAONE-2.4B achieve just above " + }, + { + "bbox": [ + 67, + 515, + 291, + 677 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 67, + 515, + 291, + 677 + ], + "type": "text", + "content": " acc@k. Second, many LRMs struggle with robust correctness in top-p sampling, where acc@k is significantly lower than pass@1. This issue is particularly pronounced in smaller LRMs. For instance, DS-R1-1.5B achieved " + }, + { + "bbox": [ + 67, + 515, + 291, + 677 + ], + "type": "inline_equation", + "content": "81.47\\%" + }, + { + "bbox": [ + 67, + 515, + 291, + 677 + ], + "type": "text", + "content": " pass@1 but only " + }, + { + "bbox": [ + 67, + 515, + 291, + 677 + ], + "type": "inline_equation", + "content": "54.50\\%" + }, + { + "bbox": [ + 67, + 515, + 291, + 677 + ], + "type": "text", + "content": " acc@k." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 691, + 187, + 703 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 691, + 187, + 703 + ], + "spans": [ + { + "bbox": [ + 67, + 691, + 187, + 703 + ], + "type": "text", + "content": "5 Efficiency Analysis" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 715, + 241, + 729 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 715, + 241, + 729 + ], + "spans": [ + { + "bbox": [ + 67, + 715, + 241, + 729 + ], + "type": "text", + "content": "5.1 Analysis across Question Types" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "content": "To better understand the efficiency differences of LRRMs across question types, we analyze the average response tokens across 4 main categories, 28" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 491, + 524, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 491, + 524, + 518 + ], + "spans": [ + { + "bbox": [ + 302, + 491, + 524, + 518 + ], + "type": "text", + "content": "subcategories, and two languages. The results are displayed in Table 4 and Appendix C.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 524, + 525, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 524, + 525, + 687 + ], + "spans": [ + { + "bbox": [ + 302, + 524, + 525, + 687 + ], + "type": "text", + "content": "LRMs exhibit a substantial increase in response length across all four major categories, 28 subcategories, and two languages. As shown in Table 4, for each of the four major categories, the average response length of LRMs exceeds that of LLMs by more than a factor of ten. Response lengths also increase significantly across all subcategories (see Appendix C.2). This suggests that while LRMs are primarily trained on reasoning data to produce long CoT style responses, this stylistic pattern generalizes well across a wide range of question types." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 687, + 524, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 687, + 524, + 714 + ], + "spans": [ + { + "bbox": [ + 302, + 687, + 524, + 714 + ], + "type": "text", + "content": "Moreover, 23 out of the 28 LRMs produce longer responses to questions in English than Chinese." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 721, + 524, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 721, + 524, + 774 + ], + "spans": [ + { + "bbox": [ + 302, + 721, + 524, + 774 + ], + "type": "text", + "content": "LRMs exhibit the most significant increase in ART for instruction following questions and tend to over-exlore when the solution space is vast. As shown in Table 4, although small LLMs" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 793, + 301, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 301, + 803 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 301, + 803 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 69, + 286, + 237 + ], + "blocks": [ + { + "bbox": [ + 73, + 69, + 286, + 237 + ], + "lines": [ + { + "bbox": [ + 73, + 69, + 286, + 237 + ], + "spans": [ + { + "bbox": [ + 73, + 69, + 286, + 237 + ], + "type": "image", + "image_path": "7a3795cc2e55a9b576ec292b5aa56d5cdc6ea397ea09993746b001df9df3ab87.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 246, + 291, + 283 + ], + "lines": [ + { + "bbox": [ + 67, + 246, + 291, + 283 + ], + "spans": [ + { + "bbox": [ + 67, + 246, + 291, + 283 + ], + "type": "text", + "content": "Figure 3: (a) Comparison of first round and additional token costs for each LRM. (b) Distribution of solution rounds for each LRM." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 308, + 291, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 308, + 291, + 550 + ], + "spans": [ + { + "bbox": [ + 67, + 308, + 291, + 550 + ], + "type": "text", + "content": "provide the most concise responses to instruction following questions, LRMs generate dramatically longer outputs—96.0 times longer in English and 57.3 times longer in Chinese than small LLMs. To investigate the cause, we further analyze the subcategories of instruction following questions. As shown in Appendix C.2, average tokens is notably longer in the subcategories of length constraints, character constraints, and sentence constraints. These three question types share a similar characteristic: their correctness is easy to verify, but the solution space is vast. We find that, although the model quickly identifies a correct answer, it becomes trapped in the search space, continually exploring alternatives and failing to stop in time. A case can be seen in Table 21. This phenomenon is more pronounced in families with lower efficiency, such as s1.1 and EXAONE." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 568, + 224, + 581 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 568, + 224, + 581 + ], + "spans": [ + { + "bbox": [ + 67, + 568, + 224, + 581 + ], + "type": "text", + "content": "5.2 Thinking Solution Analysis" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 590, + 291, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 590, + 291, + 724 + ], + "spans": [ + { + "bbox": [ + 67, + 590, + 291, + 724 + ], + "type": "text", + "content": "To better understand the causes of inefficiency in LRMs on S1-Bench, we analyze the solution rounds of their thinking processes7. We first use DeepSeek-v3 to segment each thinking process into several solutions, each defined as a point at which LRMs explicitly arrives at a conclusion that matches the correct answer. We then compute the average token counts in the first solution. The detailed experimental setup is provided in Appendix C.3. Our analysis reveals the following:" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 308, + 71, + 521, + 184 + ], + "blocks": [ + { + "bbox": [ + 308, + 71, + 521, + 184 + ], + "lines": [ + { + "bbox": [ + 308, + 71, + 521, + 184 + ], + "spans": [ + { + "bbox": [ + 308, + 71, + 521, + 184 + ], + "type": "image", + "image_path": "165f30c01295e8eb67f8483f5b0550795a5819862b0e4f5ca8b4d0a0d987a2ac.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 194, + 526, + 253 + ], + "lines": [ + { + "bbox": [ + 302, + 194, + 526, + 253 + ], + "spans": [ + { + "bbox": [ + 302, + 194, + 526, + 253 + ], + "type": "text", + "content": "Figure 4: Distribution of the thinking process across four categories. FA and TP refer to Final Answer and Thinking Process, respectively. Green bars indicate cases where the final answer is correct, while red bars indicate cases where it is incorrect." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 278, + 526, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 278, + 526, + 359 + ], + "spans": [ + { + "bbox": [ + 302, + 278, + 526, + 359 + ], + "type": "text", + "content": "The token consumed in the first solution of LRMs significantly exceeds that of validator LLMs, as shown in Figure 3 (a). This suggests that LRMs may involve unnecessary reasoning steps in each solution, which could be one of the reasons for their inefficiency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 370, + 526, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 370, + 526, + 574 + ], + "spans": [ + { + "bbox": [ + 302, + 370, + 526, + 574 + ], + "type": "text", + "content": "The primary reason for efficiency gaps between LRMs lies in the number of redundant solution rounds they generate, rather than the token cost in the initial round. As shown in Figure 3 (a), although total thinking token counts vary widely across LRMs, their token counts in the initial round are similar and only account for a small fraction of the total. Figure 3 (b) further shows the distribution of solution rounds on S1-Bench, revealing that LRMs with longer thinking processes tend to generate more solution round, and this redundancy greatly increases computational cost. Furthermore, further experiments reveal that the redundancy in the reasoning process gradually increases over time. Appendix C.4 presents the experimental details." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 588, + 400, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 588, + 400, + 602 + ], + "spans": [ + { + "bbox": [ + 302, + 588, + 400, + 602 + ], + "type": "text", + "content": "6 Error Analysis" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 301, + 613, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 613, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 301, + 613, + 526, + 775 + ], + "type": "text", + "content": "This section analyzes the errors made in the thinking process. Specifically, we utilize DeepSeekv3 to categorize the responses of LRMs into four cases and compute the corresponding proportions: (1) Final answer correct; thinking process entirely accurate. (2) Final answer correct; thinking process contains intermediate errors. (3) Final answer incorrect; correct answer mentioned in thinking process. (4) Final answer incorrect; correct answer never mentioned in thinking process. The classification details are in Appendix C.5; results are shown in Figure 4. Key findings include:" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 739, + 290, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 739, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 739, + 290, + 775 + ], + "type": "text", + "content": "7We only analyze well-formatted thinking processes with correct final answers, as incorrect answers make it unclear whether LRMs are over-reasoning or under-reasoning, and malformed thinking processes cannot be precisely extracted." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 293, + 792, + 300, + 802 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 792, + 300, + 802 + ], + "spans": [ + { + "bbox": [ + 293, + 792, + 300, + 802 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 77, + 71, + 282, + 237 + ], + "blocks": [ + { + "bbox": [ + 77, + 71, + 282, + 237 + ], + "lines": [ + { + "bbox": [ + 77, + 71, + 282, + 237 + ], + "spans": [ + { + "bbox": [ + 77, + 71, + 282, + 237 + ], + "type": "image", + "image_path": "36f5f6a81bdd9b531a8ac7893e1e2c26b7fb338fe001e3626140dc3806b85f1b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 248, + 291, + 273 + ], + "lines": [ + { + "bbox": [ + 67, + 248, + 291, + 273 + ], + "spans": [ + { + "bbox": [ + 67, + 248, + 291, + 273 + ], + "type": "text", + "content": "Figure 5: Top: Count of \"gut moments\" across models. Bottom: Probability of \"gut moments\" by question type." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 292, + 291, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 292, + 291, + 413 + ], + "spans": [ + { + "bbox": [ + 67, + 292, + 291, + 413 + ], + "type": "text", + "content": "Lower-accuracy LRMs tend to produce less reliable reasoning chains; even when they arrive at the correct final answer, their intermediate steps often contain errors (light green). LRMs with high accuracy (e.g., DS-R1) show almost no flawed reasoning steps, whereas those with lower accuracy (e.g., DS-R1-1.5B) often generate incorrect intermediate conclusions, further indicating that they lack robust reasoning ability." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 421, + 291, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 421, + 291, + 529 + ], + "spans": [ + { + "bbox": [ + 67, + 421, + 291, + 529 + ], + "type": "text", + "content": "Although LRMs sometimes mention the correct answer during reasoning, they may deviate and ultimately produce incorrect final answers (light red). In one case, the LRM initially arrived at the correct answer but undermined it through excessive verification, a case can be seen in Table 24. In another case, the LRM directly denies the correct answer, a case can be seen in Table 23." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 539, + 156, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 539, + 156, + 551 + ], + "spans": [ + { + "bbox": [ + 67, + 539, + 156, + 551 + ], + "type": "text", + "content": "7 Gut Moment" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 560, + 291, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 560, + 291, + 719 + ], + "spans": [ + { + "bbox": [ + 67, + 560, + 291, + 719 + ], + "type": "text", + "content": "We observe an intriguing phenomenon on S1-Bench: LRMs sometimes show an early sense of question difficulty before solving, which we call the \"gut moment.\" To explore this phenomenon, we prompt GPT-4o to classify the initial part of model responses (before the first \"\\n\\ntypes based on its comment on difficulty: easy, neutral, difficult, and no comment. Figure 5 presents these classifications and their probabilities across four question types. Experimental details and cases are in Appendix C.6. This leads to the following observations:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 722, + 291, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 722, + 291, + 749 + ], + "spans": [ + { + "bbox": [ + 67, + 722, + 291, + 749 + ], + "type": "text", + "content": "First, all LRMs show the \"gut moment\" phenomenon to varying degrees, which is more evident" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 310, + 72, + 518, + 169 + ], + "blocks": [ + { + "bbox": [ + 310, + 72, + 518, + 169 + ], + "lines": [ + { + "bbox": [ + 310, + 72, + 518, + 169 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 518, + 169 + ], + "type": "image", + "image_path": "a5c4a6d06d561fec23d5e14de192381c1db489b74d1bad7bd2dc349cd4dde9ae.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 181, + 525, + 206 + ], + "lines": [ + { + "bbox": [ + 302, + 181, + 525, + 206 + ], + "spans": [ + { + "bbox": [ + 302, + 181, + 525, + 206 + ], + "type": "text", + "content": "Figure 6: Average response tokens in the easy category vs. all samples. Dots show difference: easy minus all." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 301, + 233, + 526, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 233, + 526, + 410 + ], + "spans": [ + { + "bbox": [ + 301, + 233, + 526, + 410 + ], + "type": "text", + "content": "in the Qwen, DeepSeek, and Light-R1 families and Hunyuan-T1. Second, LRMs show stylistic differences in expressing \"gut moment.\" For example, the Qwen family often views questions as simple, whereas the DeepSeek-distilled models show more diverse difficulty comments. Third, some LRMs show significantly stronger \"gut moment\" in Chinese than in English, such as the Qwen and DeepSeek families, likely due to a higher proportion of Chinese in their training data. Finally, the \"gut moment\" is most evident in reasoning questions and rarely appears in analytical questions, except in DeepSeek-distilled models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 413, + 525, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 413, + 525, + 589 + ], + "spans": [ + { + "bbox": [ + 302, + 413, + 525, + 589 + ], + "type": "text", + "content": "To investigate whether the early sense of a question as \"easy\" leads to a corresponding reduction in response length, we compare the average response tokens for questions in the easy category versus all samples. The results are shown in Figure 6. Except for L-R1-32B, other LRMs do not exhibit a noticeable decrease in response length when questions are viewed as \"easy\"; in fact, 21 out of 28 LRMs showed an increase in response length under this condition. This suggests a discrepancy between the LRM's initial sense of difficulty and its generative behavior, the causes and improvements of which warrant further investigation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 610, + 381, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 610, + 381, + 622 + ], + "spans": [ + { + "bbox": [ + 302, + 610, + 381, + 622 + ], + "type": "text", + "content": "8 Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 301, + 640, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 640, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 301, + 640, + 526, + 775 + ], + "type": "text", + "content": "This paper introduces S1-Bench, the first benchmark designed to evaluate system 1 thinking capabilities in LRMs. We conduct extensive evaluations across 28 LRMs, revealing their inefficiency, inadequate accuracy, and limited robustness when handling simple questions. Additionally, we observe \"gut moment\" and find a gap between their difficulty perception and generation length. Overall, this work paves the way toward dual-system compatibility in the development of LRMs." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 756, + 290, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 756, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 756, + 290, + 774 + ], + "type": "text", + "content": "8Derived from \"gut feeling,\" meaning intuition-based judgment without analysis." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 131, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 131, + 84 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 131, + 84 + ], + "type": "text", + "content": "Limitations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 94, + 293, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 94, + 293, + 309 + ], + "spans": [ + { + "bbox": [ + 67, + 94, + 293, + 309 + ], + "type": "text", + "content": "Although S1-Bench pioneers the evaluation of system 1 thinking in LRMs, it still has several limitations. First, due to our emphasis on ensuring the uniqueness of each sample during dataset construction—for instance, including only one question for basic arithmetic operations such as addition, subtraction, and multiplication—the overall scale of the benchmark remains limited. As a next step, we plan to expand the scale of S1-Bench. Second, while recent months have seen a surge in newly released open-source LRMs, we have only evaluated 28 representative models and have not covered the full spectrum of available models. Lastly, we do not propose methods to improve the efficiency of LRMs on system 1 tasks in this work; this will be the focus of our future research." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 334, + 127, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 334, + 127, + 348 + ], + "spans": [ + { + "bbox": [ + 68, + 334, + 127, + 348 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 354, + 291, + 773 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 69, + 354, + 291, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 354, + 291, + 400 + ], + "spans": [ + { + "bbox": [ + 69, + 354, + 291, + 400 + ], + "type": "text", + "content": "Pranjal Aggarwal and Sean Welleck. 2025. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 410, + 175, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 410, + 175, + 421 + ], + "spans": [ + { + "bbox": [ + 69, + 410, + 175, + 421 + ], + "type": "text", + "content": "AI-MO. 2024. Amc 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 432, + 290, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 432, + 290, + 465 + ], + "spans": [ + { + "bbox": [ + 69, + 432, + 290, + 465 + ], + "type": "text", + "content": "Daman Arora and Andrea Zanette. 2025. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 476, + 290, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 476, + 290, + 521 + ], + "spans": [ + { + "bbox": [ + 69, + 476, + 290, + 521 + ], + "type": "text", + "content": "Simon A Aytes, Jinheon Baek, and Sung Ju Hwang. 2025. Sketch-of-thought: Efficient llm reasoning with adaptive cognitive-inspired sketching. arXiv preprint arXiv:2503.05179." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 532, + 291, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 532, + 291, + 619 + ], + "spans": [ + { + "bbox": [ + 69, + 532, + 291, + 619 + ], + "type": "text", + "content": "Akhiad Bercovich, Itay Levy, Izik Golan, Mohammad Dabbah, Ran El-Yaniv, Omri Puny, Ido Galil, Zach Moshe, Tomer Ronen, Najeeb Nabwani, Ido Shahaf, Oren Tropp, Ehud Karpas, Ran Zilberstein, Jiaqi Zeng, Soumye Singhal, Alexander Bukharin, Yian Zhang, Tugrul Konuk, and 113 others. 2025. Llamameton: Efficient reasoning models. Preprint, arXiv:2505.00949." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 630, + 291, + 698 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 630, + 291, + 698 + ], + "spans": [ + { + "bbox": [ + 69, + 630, + 291, + 698 + ], + "type": "text", + "content": "Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, and 1 others. 2024. A survey on evaluation of large language models. ACM transactions on intelligent systems and technology, 15(3):1-45." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 708, + 291, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 708, + 291, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 708, + 291, + 773 + ], + "type": "text", + "content": "Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. 2025a. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 526, + 773 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 304, + 72, + 525, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 525, + 117 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 525, + 117 + ], + "type": "text", + "content": "Xiaoshu Chen, Sihang Zhou, Ke Liang, and Xinwang Liu. 2024a. Distilling reasoning ability from large language models with adaptive thinking. arXiv preprint arXiv:2404.09170." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 124, + 526, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 124, + 526, + 190 + ], + "spans": [ + { + "bbox": [ + 304, + 124, + 526, + 190 + ], + "type": "text", + "content": "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Quzhi Liu, Mengfei Zhou, Zhuosheng Zhang, and 1 others. 2024b. Do not think that much for " + }, + { + "bbox": [ + 304, + 124, + 526, + 190 + ], + "type": "inline_equation", + "content": "2 + 3 = ?" + }, + { + "bbox": [ + 304, + 124, + 526, + 190 + ], + "type": "text", + "content": " on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 197, + 525, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 197, + 525, + 252 + ], + "spans": [ + { + "bbox": [ + 304, + 197, + 525, + 252 + ], + "type": "text", + "content": "Yilong Chen, Junyuan Shang, Zhengyu Zhang, Jiawei Sheng, Tingwen Liu, Shuohuan Wang, Yu Sun, Hua Wu, and Haifeng Wang. 2024c. Mixture of hidden-dimensions transformer. arXiv preprint arXiv:2412.05644." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 259, + 525, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 259, + 525, + 326 + ], + "spans": [ + { + "bbox": [ + 304, + 259, + 525, + 326 + ], + "type": "text", + "content": "Yilong Chen, Junyuan Shang, Zhenyu Zhang, Yanxi Xie, Jiawei Sheng, Tingwen Liu, Shuohuan Wang, Yu Sun, Hua Wu, and Haifeng Wang. 2025b. Inner thinking transformer: Leveraging dynamic depth scaling to foster adaptive internal thinking. arXiv preprint arXiv:2502.13842." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 333, + 526, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 333, + 526, + 410 + ], + "spans": [ + { + "bbox": [ + 304, + 333, + 526, + 410 + ], + "type": "text", + "content": "Cheng-Han Chiang and Hung-yi Lee. 2024. Overreasoning and redundant calculation of large language models. In Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 2: Short Papers), pages 161-169, St. Julian's, Malta. Association for Computational Linguistics." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 417, + 525, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 417, + 525, + 483 + ], + "spans": [ + { + "bbox": [ + 304, + 417, + 525, + 483 + ], + "type": "text", + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, and 1 others. 2021. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 491, + 525, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 491, + 525, + 557 + ], + "spans": [ + { + "bbox": [ + 304, + 491, + 525, + 557 + ], + "type": "text", + "content": "Yingqian Cui, Pengfei He, Jingying Zeng, Hui Liu, Xianfeng Tang, Zhenwei Dai, Yan Han, Chen Luo, Jing Huang, Zhen Li, and 1 others. 2025. Stepwise perplexity-guided refinement for efficient chain-of-thought reasoning in large language models. arXiv preprint arXiv:2502.13260." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 564, + 526, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 564, + 526, + 619 + ], + "spans": [ + { + "bbox": [ + 304, + 564, + 526, + 619 + ], + "type": "text", + "content": "Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, and 1 others. 2025. Dynamic parallel tree search for efficient llm reasoning. arXiv preprint arXiv:2502.16235." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 627, + 525, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 627, + 525, + 661 + ], + "spans": [ + { + "bbox": [ + 304, + 627, + 525, + 661 + ], + "type": "text", + "content": "Xiachong Feng, Longxu Dou, and Lingpeng Kong. 2025. Reasoning does not necessarily improve roleplaying ability. arXiv preprint arXiv:2502.16940." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 667, + 526, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 667, + 526, + 711 + ], + "spans": [ + { + "bbox": [ + 304, + 667, + 526, + 711 + ], + "type": "text", + "content": "Yichao Fu, Junda Chen, Siqi Zhu, Zheyu Fu, Zhongdongming Dai, Aurick Qiao, and Hao Zhang. 2024. Efficiently serving llm reasoning programs with certainindex. arXiv preprint arXiv:2412.20993." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 719, + 525, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 719, + 525, + 773 + ], + "spans": [ + { + "bbox": [ + 304, + 719, + 525, + 773 + ], + "type": "text", + "content": "Yichao Fu, Junda Chen, Yonghao Zhuang, Zheyu Fu, Ion Stoica, and Hao Zhang. 2025. Reasoning without self-doubt: More efficient chain-of-thought through certainty probing. In ICLR 2025 Workshop on Foundation Models in the Wild." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 772 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 291, + 137 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 291, + 137 + ], + "type": "text", + "content": "Isabel O Gallegos, Ryan A Rossi, Joe Barrow, Md Mehrab Tanjim, Sungchul Kim, Franck Dernoncourt, Tong Yu, Ruiyi Zhang, and Nesreen K Ahmed. 2024. Bias and fairness in large language models: A survey. Computational Linguistics, 50(3):1097-1179." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 149, + 290, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 149, + 290, + 193 + ], + "spans": [ + { + "bbox": [ + 69, + 149, + 290, + 193 + ], + "type": "text", + "content": "Tyler Griggs, Shiyi Cao, Dacheng Li, Shu Liu, Shishir G. Patil, Matei Zaharia, Joey Gonzalez, and Ion Stoica. 2025. Think less, achieve more: Cut reasoning costs by " + }, + { + "bbox": [ + 69, + 149, + 290, + 193 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 69, + 149, + 290, + 193 + ], + "type": "text", + "content": " without sacrificing accuracy." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 204, + 290, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 204, + 290, + 269 + ], + "spans": [ + { + "bbox": [ + 69, + 204, + 290, + 269 + ], + "type": "text", + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, and 1 others. 2025. DeepSeek-R1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 280, + 290, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 280, + 290, + 324 + ], + "spans": [ + { + "bbox": [ + 69, + 280, + 290, + 324 + ], + "type": "text", + "content": "Tingxu Han, Zhenting Wang, Chunrong Fang, Shiyu Zhao, Shiqing Ma, and Zhenyu Chen. 2024. Token-budget-aware llm reasoning. arXiv preprint arXiv:2412.18547." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 335, + 290, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 335, + 290, + 400 + ], + "spans": [ + { + "bbox": [ + 69, + 335, + 290, + 400 + ], + "type": "text", + "content": "Masoud Hashemi, Oluwanifemi Bamgp Bose, Sathwik Tejaswi Madhusudhan, Jishnu Sethumadhavan Nair, Aman Tiwari, and Vikas Yadav. 2025. Dna bench: When silence is smarter-benchmarking over-reasoning in reasoning llms. arXiv preprint arXiv:2503.15793." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 412, + 290, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 412, + 290, + 488 + ], + "spans": [ + { + "bbox": [ + 69, + 412, + 290, + 488 + ], + "type": "text", + "content": "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, and 1 others. 2024. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 500, + 290, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 500, + 290, + 555 + ], + "spans": [ + { + "bbox": [ + 69, + 500, + 290, + 555 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2021a. Measuring massive multitask language understanding. In International Conference on Learning Representations." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 565, + 290, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 565, + 290, + 620 + ], + "spans": [ + { + "bbox": [ + 69, + 565, + 290, + 620 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021b. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 631, + 290, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 631, + 290, + 685 + ], + "spans": [ + { + "bbox": [ + 69, + 631, + 290, + 685 + ], + "type": "text", + "content": "Bairu Hou, Yang Zhang, Jiabao Ji, Yujuan Liu, Kaizhi Qian, Jacob Andreas, and Shiyu Chang. 2025. Thinkprune: Pruning long chain-of-thought of llms via reinforcement learning. arXiv preprint arXiv:2504.01296." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 696, + 290, + 772 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 696, + 290, + 772 + ], + "spans": [ + { + "bbox": [ + 69, + 696, + 290, + 772 + ], + "type": "text", + "content": "Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. 2025. Livecodebench: Holistic and contamination free evaluation of large language models for code. In The Thirteenth International Conference on Learning Representations." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 774 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 304, + 72, + 525, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 525, + 127 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 525, + 127 + ], + "type": "text", + "content": "Fengqing Jiang, Zhangchen Xu, Yuetai Li, Luyao Niu, Zhen Xiang, Bo Li, Bill Yuchen Lin, and Radha Poovendran. 2025. Safechain: Safety of language models with long chain-of-thought reasoning capabilities. arXiv preprint arXiv:2502.12025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 135, + 525, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 135, + 525, + 189 + ], + "spans": [ + { + "bbox": [ + 304, + 135, + 525, + 189 + ], + "type": "text", + "content": "Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. 2025a. Overthink: Slowdown attacks on reasoning llms. arXiv e-prints, pages arXiv-2502." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 197, + 525, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 197, + 525, + 264 + ], + "spans": [ + { + "bbox": [ + 304, + 197, + 525, + 264 + ], + "type": "text", + "content": "Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip HS Torr, Salman Khan, and Fahad Shahbaz Khan. 2025b. Llm post-training: A deep dive into reasoning large language models. arXiv preprint arXiv:2502.21321." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 271, + 525, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 271, + 525, + 315 + ], + "spans": [ + { + "bbox": [ + 304, + 271, + 525, + 315 + ], + "type": "text", + "content": "Ayeong Lee, Ethan Che, and Tianyi Peng. 2025. How well do llms compress their own chain-of-thought? a token complexity approach. arXiv preprint arXiv:2503.01141." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 322, + 525, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 322, + 525, + 388 + ], + "spans": [ + { + "bbox": [ + 304, + 322, + 525, + 388 + ], + "type": "text", + "content": "Dacheng Li, Shiyi Cao, Tyler Griggs, Shu Liu, Xiangxi Mo, Eric Tang, Sumanth Hegde, Kourosh Hakhamaneshi, Shishir G Patil, Matei Zaharia, and 1 others. 2025a. Llms can easily learn to reason from demonstrations structure, not content, is what matters! arXiv preprint arXiv:2502.07374." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 396, + 525, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 396, + 525, + 449 + ], + "spans": [ + { + "bbox": [ + 304, + 396, + 525, + 449 + ], + "type": "text", + "content": "Yiwei Li, Peiwen Yuan, Shaoxiong Feng, Boyuan Pan, Xinglin Wang, Bin Sun, Heda Wang, and Kan Li. 2024. Escape sky-high cost: Early-stopping self-consistency for multi-step reasoning. arXiv preprint arXiv:2401.10480." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 458, + 525, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 458, + 525, + 523 + ], + "spans": [ + { + "bbox": [ + 304, + 458, + 525, + 523 + ], + "type": "text", + "content": "Zhong-Zhi Li, Duzhen Zhang, Ming-Liang Zhang, Ji-axin Zhang, Zengyan Liu, Yuxuan Yao, Haotian Xu, Junhao Zheng, Pei-Jie Wang, Xiuyi Chen, and 1 others. 2025b. From system 1 to system 2: A survey of reasoning large language models. arXiv preprint arXiv:2502.17419." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 532, + 525, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 532, + 525, + 586 + ], + "spans": [ + { + "bbox": [ + 304, + 532, + 525, + 586 + ], + "type": "text", + "content": "Baohao Liao, Yuhui Xu, Hanze Dong, Junnan Li, Christof Monz, Silvio Savarese, Doyen Sahoo, and Caiming Xiong. 2025. Reward-guided speculative decoding for efficient ltm reasoning. arXiv preprint arXiv:2501.19324." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 593, + 525, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 593, + 525, + 660 + ], + "spans": [ + { + "bbox": [ + 304, + 593, + 525, + 660 + ], + "type": "text", + "content": "Ximing Lu, Seungju Han, David Acuna, Hyunwoo Kim, Jaehun Jung, Shrimai Prabhumoye, Niklas Muennighoff, Mostofa Patwary, Mohammad Shoeybi, Bryan Catanzaro, and 1 others. 2025. Retro-search: Exploring untaken paths for deeper and efficient reasoning. arXiv preprint arXiv:2504.04383." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 667, + 525, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 667, + 525, + 723 + ], + "spans": [ + { + "bbox": [ + 304, + 667, + 525, + 723 + ], + "type": "text", + "content": "Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. 2025. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning. arXiv preprint arXiv:2501.12570." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 729, + 525, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 729, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 729, + 525, + 774 + ], + "type": "text", + "content": "Wenjie Ma, Jingxuan He, Charlie Snell, Tyler Griggs, Sewon Min, and Matei Zaharia. 2025a. Reasoning models can be effective without thinking. arXiv preprint arXiv:2504.09858." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 793, + 304, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 793, + 304, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 793, + 304, + 803 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 773 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "type": "text", + "content": "Xinyin Ma, Guangnian Wan, Runpeng Yu, Gongfan Fang, and Xinchao Wang. 2025b. Cot-valve: Length-compressible chain-of-thought tuning. arXiv preprint arXiv:2502.09601." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 125, + 289, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 125, + 289, + 158 + ], + "spans": [ + { + "bbox": [ + 69, + 125, + 289, + 158 + ], + "type": "text", + "content": "MAA Committees. Aime problems and solutions. https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 167, + 289, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 167, + 289, + 210 + ], + "spans": [ + { + "bbox": [ + 69, + 167, + 289, + 210 + ], + "type": "text", + "content": "Shen-Yun Miao, Chao-Chun Liang, and Keh-Yih Su. 2021. A diverse corpus for evaluating and developing english math word problem solvers. arXiv preprint arXiv:2106.15772." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 219, + 289, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 219, + 289, + 285 + ], + "spans": [ + { + "bbox": [ + 69, + 219, + 289, + 285 + ], + "type": "text", + "content": "Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, and 1 others. 2024. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 294, + 289, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 294, + 289, + 349 + ], + "spans": [ + { + "bbox": [ + 69, + 294, + 289, + 349 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 358, + 289, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 358, + 289, + 402 + ], + "spans": [ + { + "bbox": [ + 69, + 358, + 289, + 402 + ], + "type": "text", + "content": "Tergel Munkhbat, Namgyu Ho, Seo Hyun Kim, Yongjin Yang, Yujin Kim, and Se-Young Yun. 2025. Self-training elicits concise reasoning in large language models. URL https://arxiv.org/abs/2502.20122." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 411, + 289, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 411, + 289, + 443 + ], + "spans": [ + { + "bbox": [ + 69, + 411, + 289, + 443 + ], + "type": "text", + "content": "OpenAI. 2024. Learning to reason with LLMs. https://openai.com/index/learning-to-reason-with-11ms/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 452, + 289, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 452, + 289, + 497 + ], + "spans": [ + { + "bbox": [ + 69, + 452, + 289, + 497 + ], + "type": "text", + "content": "Rui Pan, Yinwei Dai, Zhihao Zhang, Gabriele Oliaro, Zhihao Jia, and Ravi Netravali. 2025. Specreason: Fast and accurate inference-time compute via speculative reasoning. arXiv preprint arXiv:2504.07891." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 506, + 289, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 506, + 289, + 572 + ], + "spans": [ + { + "bbox": [ + 69, + 506, + 289, + 572 + ], + "type": "text", + "content": "Arkil Patel, Satwik Bhattachamishra, and Navin Goyal. 2021. Are nlp models really able to solve simple math word problems? In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2080-2094." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 580, + 289, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 580, + 289, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 580, + 289, + 645 + ], + "type": "text", + "content": "Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, and 1 others. 2025a. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond. arXiv preprint arXiv:2503.21614." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 655, + 289, + 710 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 655, + 289, + 710 + ], + "spans": [ + { + "bbox": [ + 69, + 655, + 289, + 710 + ], + "type": "text", + "content": "Yuxiao Qu, Matthew YR Yang, Amrith Setlur, Lewis Tunstall, Edward Emanuel Beeching, Ruslan Salakhutdinov, and Aviral Kumar. 2025b. Optimizing test-time compute via meta reinforcement finetuning. arXiv preprint arXiv:2503.07572." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 719, + 289, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 719, + 289, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 719, + 289, + 773 + ], + "type": "text", + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2024. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 524, + 773 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 304, + 72, + 524, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 524, + 137 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 524, + 137 + ], + "type": "text", + "content": "LG Research, Kyunghoon Bae, Eunbi Choi, Kibong Choi, Stanley Jungkyu Choi, Yemuk Choi, Seokhee Hong, Junwon Hwang, Hyojin Jeon, Kijeong Jeon, and 1 others. 2025. Exaone deep: Reasoning enhanced language models. arXiv preprint arXiv:2503.12524." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 145, + 524, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 145, + 524, + 190 + ], + "spans": [ + { + "bbox": [ + 304, + 145, + 524, + 190 + ], + "type": "text", + "content": "Jianshu She, Zhuohao Li, Zhemin Huang, Qi Li, Peiran Xu, Haonan Li, and Qirong Ho. 2025. Hawkeye: Efficient reasoning with model collaboration. arXiv preprint arXiv:2504.00424." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 197, + 524, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 197, + 524, + 252 + ], + "spans": [ + { + "bbox": [ + 304, + 197, + 524, + 252 + ], + "type": "text", + "content": "Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. 2025a. Dast: Difficulty-adaptive slow-thinking for large reasoning models. arXiv preprint arXiv:2503.04472." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 260, + 524, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 260, + 524, + 304 + ], + "spans": [ + { + "bbox": [ + 304, + 260, + 524, + 304 + ], + "type": "text", + "content": "Zhenyi Shen, Hanqi Yan, Linhai Zhang, Zhanghao Hu, Yali Du, and Yulan He. 2025b. Codi: Compressing chain-of-thought into continuous space via self-distillation. arXiv preprint arXiv:2502.21074." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 312, + 524, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 312, + 524, + 378 + ], + "spans": [ + { + "bbox": [ + 304, + 312, + 524, + 378 + ], + "type": "text", + "content": "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen Zhong, Hanjie Chen, and 1 others. 2025. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 386, + 524, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 386, + 524, + 473 + ], + "spans": [ + { + "bbox": [ + 304, + 386, + 524, + 473 + ], + "type": "text", + "content": "Alon Talmor, Jonathan Herzig, Nicholas Lourie, and Jonathan Berant. 2019. Commonsenseqa: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4149-4158." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 481, + 524, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 481, + 524, + 526 + ], + "spans": [ + { + "bbox": [ + 304, + 481, + 524, + 526 + ], + "type": "text", + "content": "Amir Taubenfeld, Tom Sheffer, Eran Ofek, Amir Feder, Ariel Goldstein, Zorik Gekhman, and Gal Yona. 2025. Confidence improves self-consistency in llms. arXiv preprint arXiv:2502.06233." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 533, + 524, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 524, + 577 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 524, + 577 + ], + "type": "text", + "content": "Kimi Team, A Du, B Gao, B Xing, C Jiang, C Chen, C Li, C Xiao, C Du, C Liao, and 1 others. 2025a. Kimi k1. 5: Scaling reinforcement learning with llms. URL https://arxiv.org/abs/2501.12599." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 585, + 524, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 585, + 524, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 585, + 524, + 640 + ], + "type": "text", + "content": "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, and 1 others. 2025b. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 648, + 524, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 648, + 524, + 681 + ], + "spans": [ + { + "bbox": [ + 304, + 648, + 524, + 681 + ], + "type": "text", + "content": "NovaSky Team. 2025a. Sky-t1: Train your own o1 preview model within $450. https://novasky-ai.github.io/posts/sky-t1." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 688, + 524, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 688, + 524, + 722 + ], + "spans": [ + { + "bbox": [ + 304, + 688, + 524, + 722 + ], + "type": "text", + "content": "Qwen Team. 2025b. QwQ-32b: Embracing the power of reinforcement learning. https://qwenlm.github. b.io/blog/qwq-32b/." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 729, + 524, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 729, + 524, + 773 + ], + "spans": [ + { + "bbox": [ + 304, + 729, + 524, + 773 + ], + "type": "text", + "content": "Tencent. 2025. Reasoning efficiency redefined! meet Tencent's 'hunyuan-t1'—the first mamba-powered ultra-large model. https://llm.hunyuan.tencent.com/#/Blog/hy-t1/." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 793, + 302, + 802 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 793, + 302, + 802 + ], + "spans": [ + { + "bbox": [ + 291, + 793, + 302, + 802 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 772 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "type": "text", + "content": "Guangya Wan, Yuqi Wu, Jie Chen, and Sheng Li. 2024. Dynamic self-consistency: Leveraging reasoning paths for efficient llm sampling. arXiv preprint arXiv:2408.17017." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 127, + 289, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 127, + 289, + 193 + ], + "spans": [ + { + "bbox": [ + 69, + 127, + 289, + 193 + ], + "type": "text", + "content": "Junlin Wang, Shang Zhu, Jon Saad-Falcon, Ben Athiwaratkun, Qingyang Wu, Jue Wang, Shuaiwen Leon Song, Ce Zhang, Bhuwan Dhingra, and James Zou. 2025a. Think deep, think fast: Investigating efficiency of verifier-free inference-time-scaling methods. arXiv preprint arXiv:2504.14047." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 204, + 289, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 204, + 289, + 259 + ], + "spans": [ + { + "bbox": [ + 69, + 204, + 289, + 259 + ], + "type": "text", + "content": "Xinglin Wang, Shaoxiong Feng, Yiwei Li, Peiwen Yuan, Yueqi Zhang, Chuyi Tan, Boyuan Pan, Yao Hu, and Kan Li. 2024. Make every penny count: Difficulty-adaptive self-consistency for cost-efficient reasoning. arXiv preprint arXiv:2408.13457." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 270, + 289, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 270, + 289, + 324 + ], + "spans": [ + { + "bbox": [ + 69, + 270, + 289, + 324 + ], + "type": "text", + "content": "Yiming Wang, Pei Zhang, Siyuan Huang, Baosong Yang, Zhuosheng Zhang, Fei Huang, and Rui Wang. 2025b. Sampling-efficient test-time scaling: Self-estimating the best-of-n sampling in early decoding. arXiv preprint arXiv:2503.01422." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 336, + 289, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 336, + 289, + 400 + ], + "spans": [ + { + "bbox": [ + 69, + 336, + 289, + 400 + ], + "type": "text", + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, and 1 others. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 412, + 289, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 412, + 289, + 466 + ], + "spans": [ + { + "bbox": [ + 69, + 412, + 289, + 466 + ], + "type": "text", + "content": "Liang Wen, Yunke Cai, Fenrui Xiao, Xin He, Qi An, Zhenyu Duan, Yimin Du, Junchen Liu, Lifu Tang, Xiaowei Lv, and 1 others. 2025. Light-R1: Curriculum sft, dpo and rl for long cot from scratch and beyond. arXiv preprint arXiv:2503.10460." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 478, + 289, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 478, + 289, + 521 + ], + "spans": [ + { + "bbox": [ + 69, + 478, + 289, + 521 + ], + "type": "text", + "content": "Heming Xia, Yongqi Li, Chak Tou Leong, Wenjie Wang, and Wenjie Li. 2025. Tokenskip: Controllable chain-of-thought compression in llms. arXiv preprint arXiv:2502.12067." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 533, + 289, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 533, + 289, + 587 + ], + "spans": [ + { + "bbox": [ + 69, + 533, + 289, + 587 + ], + "type": "text", + "content": "Jingxian Xu, Mengyu Zhou, Weichang Liu, Hanbing Liu, Shi Han, and Dongmei Zhang. 2025a. Twt: Thinking without tokens by habitual reasoning distillation with multi-teachers' guidance. arXiv preprint arXiv:2503.24198." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 598, + 289, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 598, + 289, + 641 + ], + "spans": [ + { + "bbox": [ + 69, + 598, + 289, + 641 + ], + "type": "text", + "content": "Yuhui Xu, Hanze Dong, Lei Wang, Doyen Sahoo, Junnan Li, and Caiming Xiong. 2025b. Scalable chain of thoughts via elastic reasoning. arXiv preprint arXiv:2505.05315." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 654, + 289, + 707 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 654, + 289, + 707 + ], + "spans": [ + { + "bbox": [ + 69, + 654, + 289, + 707 + ], + "type": "text", + "content": "Yuchen Yan, Yongliang Shen, Yang Liu, Jin Jiang, Mengdi Zhang, Jian Shao, and Yueting Zhuang. 2025. Infty think: Breaking the length limits of long-context reasoning in large language models. arXiv preprint arXiv:2503.06692." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 719, + 289, + 772 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 719, + 289, + 772 + ], + "spans": [ + { + "bbox": [ + 69, + 719, + 289, + 772 + ], + "type": "text", + "content": "An Yang, Anfeng Li, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Gao, Chengen Huang, Chenxu Lv, and 1 others. 2025a. Qwen3 technical report. arXiv preprint arXiv:2505.09388." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 524, + 740 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "text", + "content": "Chenxu Yang, Qingyi Si, Yongjie Duan, Zheliang Zhu, Chenyu Zhu, Zheng Lin, Li Cao, and Weiping Wang. 2025b. Dynamic early exit in reasoning models. arXiv preprint arXiv:2504.15895." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 125, + 524, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 125, + 524, + 158 + ], + "spans": [ + { + "bbox": [ + 304, + 125, + 524, + 158 + ], + "type": "text", + "content": "Junjie Yang, Ke Lin, and Xing Yu. 2025c. Think when you need: Self-adaptive chain-of-thought learning. arXiv preprint arXiv:2504.03234." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 167, + 524, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 167, + 524, + 211 + ], + "spans": [ + { + "bbox": [ + 304, + 167, + 524, + 211 + ], + "type": "text", + "content": "Wang Yang, Xiang Yue, Vipin Chaudhary, and Xiaotian Han. 2025d. Speculative thinking: Enhancing small-model reasoning with large model guidance at inference time. arXiv preprint arXiv:2504.12329." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 220, + 524, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 220, + 524, + 263 + ], + "spans": [ + { + "bbox": [ + 304, + 220, + 524, + 263 + ], + "type": "text", + "content": "Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. 2025e. Towards thinking-optimal scaling of test-time compute for llm reasoning. arXiv preprint arXiv:2502.18080." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 273, + 524, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 273, + 524, + 306 + ], + "spans": [ + { + "bbox": [ + 304, + 273, + 524, + 306 + ], + "type": "text", + "content": "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. 2025. LIMO: Less is more for reasoning. arXiv preprint arXiv:2502.03387." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 314, + 524, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 314, + 524, + 357 + ], + "spans": [ + { + "bbox": [ + 304, + 314, + 524, + 357 + ], + "type": "text", + "content": "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 367, + 524, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 367, + 524, + 422 + ], + "spans": [ + { + "bbox": [ + 304, + 367, + 524, + 422 + ], + "type": "text", + "content": "Bin Yu, Hang Yuan, Yuliang Wei, Bailing Wang, Weizhen Qi, and Kai Chen. 2025a. Long-short chain-of-thought mixture supervised fine-tuning eliciting efficient reasoning in large language models. arXiv preprint arXiv:2505.03469." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 431, + 524, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 431, + 524, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 431, + 524, + 464 + ], + "type": "text", + "content": "Zhaojian Yu, Yinghao Wu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. 2025b. Z1: Efficient test-time scaling with code. arXiv preprint arXiv:2504.00810." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 473, + 524, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 524, + 526 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 524, + 526 + ], + "type": "text", + "content": "Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. 2025a. Lighthinker: Thinking step-by-step compression. arXiv preprint arXiv:2502.15589." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 536, + 524, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 536, + 524, + 591 + ], + "spans": [ + { + "bbox": [ + 304, + 536, + 524, + 591 + ], + "type": "text", + "content": "Wenyuan Zhang, Tianyun Liu, Mengxiao Song, Xiaodong Li, and Tingwen Liu. 2025b. SOTOPIAΩ: Dynamic strategy injection learning and social instruction following evaluation for social agents. Preprint, arXiv:2502.15538." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 599, + 524, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 599, + 524, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 599, + 524, + 677 + ], + "type": "text", + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. 2023. Judging LLM-as-a-judge with MT-bench and chatbot arena. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 685, + 524, + 740 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 685, + 524, + 740 + ], + "spans": [ + { + "bbox": [ + 304, + 685, + 524, + 740 + ], + "type": "text", + "content": "Alireza S Ziabari, Nona Ghazizadeh, Zhivar Sourati, Farzan Karimi-Malekabadi, Payam Piray, and Morteza Dehghani. 2025. Reasoning on a spectrum: Aligning llms to system 1 and system 2 thinking. arXiv preprint arXiv:2502.12470." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 70, + 249, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 70, + 249, + 97 + ], + "spans": [ + { + "bbox": [ + 68, + 70, + 249, + 97 + ], + "type": "text", + "content": "A More Information of S1-Bench Construction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 106, + 198, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 106, + 198, + 118 + ], + "spans": [ + { + "bbox": [ + 68, + 106, + 198, + 118 + ], + "type": "text", + "content": "A.1 Benchmark Statistics" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 125, + 291, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 125, + 291, + 340 + ], + "spans": [ + { + "bbox": [ + 67, + 125, + 291, + 340 + ], + "type": "text", + "content": "We survey studies on improving the efficiency of LRMs, as there is potential overlap between these studies and the technical approaches aimed at enhancing system 1 thinking in LRMs. Table 7 presents the results of our survey. We compile the benchmarks used in these studies for evaluation, that are typically used to verify whether models achieve efficiency improvements. Benchmarks that appear more than four times include: MATH500 (Hendrycks et al., 2021b), GSM8K (Cobbe et al., 2021), AIME24/25 (MAA Committees), GPQA (Rein et al., 2024), AMC23 (AI-MO, 2024), MMLU (Hendrycks et al., 2021a), Olympiad-Bench (He et al., 2024), SVAMP (Patel et al., 2021), LiveCodeBench (Jain et al., 2025), and CommonSenseQA (Talmor et al., 2019)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 342, + 291, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 342, + 291, + 396 + ], + "spans": [ + { + "bbox": [ + 67, + 342, + 291, + 396 + ], + "type": "text", + "content": "The accuracy shown in Table 1 is the average result of the four models, Qwen2.5-7B, Llama3.1-8B, Mistral-8B, and Gemma2-9B, at temperature 0, using GPT-4o as the evaluator." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 406, + 221, + 419 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 406, + 221, + 419 + ], + "spans": [ + { + "bbox": [ + 68, + 406, + 221, + 419 + ], + "type": "text", + "content": "A.2 Subcategories in S1-Bench" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 424, + 291, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 424, + 291, + 464 + ], + "spans": [ + { + "bbox": [ + 67, + 424, + 291, + 464 + ], + "type": "text", + "content": "Figure 7 shows the pie chart distribution of 28 subcategories in S1-Bench. For more details on the subcategories, please refer to Table 8,9." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 475, + 258, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 475, + 258, + 487 + ], + "spans": [ + { + "bbox": [ + 68, + 475, + 258, + 487 + ], + "type": "text", + "content": "A.3 Prompt for S1-Bench construction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 493, + 291, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 493, + 291, + 558 + ], + "spans": [ + { + "bbox": [ + 67, + 493, + 291, + 558 + ], + "type": "text", + "content": "This section presents the prompts used in the construction of S1-Bench, including the Initial Generation prompt, the Discriminating Generation Quality prompt, and the Reduce Difficulty prompt. See Table 10 for details." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 571, + 289, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 571, + 289, + 585 + ], + "spans": [ + { + "bbox": [ + 68, + 571, + 289, + 585 + ], + "type": "text", + "content": "B Baseline Models and Evaluation Details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 593, + 204, + 606 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 593, + 204, + 606 + ], + "spans": [ + { + "bbox": [ + 68, + 593, + 204, + 606 + ], + "type": "text", + "content": "B.1 Baseline Model Details" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 611, + 291, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 611, + 291, + 678 + ], + "spans": [ + { + "bbox": [ + 67, + 611, + 291, + 678 + ], + "type": "text", + "content": "Table 11 presents the abbreviations, IDs, and URLs of LLMs used in this paper. Table 12 displays the abbreviations, IDs, URLs, organizations, training algorithms, and training data volumes of open-source LRMs evaluated in this study." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 689, + 246, + 702 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 689, + 246, + 702 + ], + "spans": [ + { + "bbox": [ + 68, + 689, + 246, + 702 + ], + "type": "text", + "content": "B.2 GPT-4o and Human Evaluation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "content": "We use GPT-4o as the evaluator to assess the correctness of the responses. If a final answer can be isolated, only the final answer is evaluated; otherwise, the entire response is assessed. The evaluation prompt is provided in Table 13." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 325, + 71, + 504, + 251 + ], + "blocks": [ + { + "bbox": [ + 325, + 71, + 504, + 251 + ], + "lines": [ + { + "bbox": [ + 325, + 71, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 325, + 71, + 504, + 251 + ], + "type": "image", + "image_path": "d9840947d8bbf7b1525dab3d32fa42e1b624558bb99254381f1ead27270614bf.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 262, + 525, + 299 + ], + "lines": [ + { + "bbox": [ + 302, + 262, + 525, + 299 + ], + "spans": [ + { + "bbox": [ + 302, + 262, + 525, + 299 + ], + "type": "text", + "content": "Figure 7: S1-Bench Category Display. The inner circle represents four major categories, and the outer circle includes 28 subcategories." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 319, + 526, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 319, + 526, + 535 + ], + "spans": [ + { + "bbox": [ + 302, + 319, + 526, + 535 + ], + "type": "text", + "content": "To evaluate the consistency between the GPT-4 judge's assessments and human judgments, we conduct a comprehensive human evaluation study involving three of the authors. Specifically, we randomly sample 20 question-answer pairs from each model's greedy decoding results, resulting in a dataset of 640 pairs derived from 32 models (including 4 verifier LLMs and 28 LRMs). The questions, reference answers, and model responses are then presented to three annotators, who independently judge the correctness of each model response. The final human evaluation results are determined through majority voting. Ultimately, the Cohen's Kappa between the human evaluators and the GPT-4 judge is calculated to be 0.83, indicating an exceptionally high level of agreement." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 544, + 450, + 557 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 544, + 450, + 557 + ], + "spans": [ + { + "bbox": [ + 302, + 544, + 450, + 557 + ], + "type": "text", + "content": "B.3 Accuracy Metrics Details" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 562, + 525, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 562, + 525, + 616 + ], + "spans": [ + { + "bbox": [ + 302, + 562, + 525, + 616 + ], + "type": "text", + "content": "Pass@1: Followed DeepSeek-R1 (Guo et al., 2025), we calculate pass@1 to assess the percentage of correct responses among the " + }, + { + "bbox": [ + 302, + 562, + 525, + 616 + ], + "type": "inline_equation", + "content": "k = 5" + }, + { + "bbox": [ + 302, + 562, + 525, + 616 + ], + "type": "text", + "content": " generations. Specifically, it is defined as:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 367, + 624, + 525, + 661 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 624, + 525, + 661 + ], + "spans": [ + { + "bbox": [ + 367, + 624, + 525, + 661 + ], + "type": "interline_equation", + "content": "\\text {p a s s} @ 1 = \\frac {1}{k} \\sum_ {i = 1} ^ {k} p _ {i}, \\tag {1}", + "image_path": "2eeed99fe8acef3f7a03fa508861598b5dc6a384d3646e244b272591a9ff2bfa.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 668, + 525, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 668, + 525, + 734 + ], + "spans": [ + { + "bbox": [ + 302, + 668, + 525, + 734 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 668, + 525, + 734 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 302, + 668, + 525, + 734 + ], + "type": "text", + "content": " is the correctness of the i-th generation. Acc@k: Since S1-Bench is composed of extremely simple questions, we calculate acc@k. Specifically, acc@k=1 if all k responses are correct and acc@k = 0 otherwise. It is defined as:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 376, + 741, + 525, + 777 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 741, + 525, + 777 + ], + "spans": [ + { + "bbox": [ + 376, + 741, + 525, + 777 + ], + "type": "interline_equation", + "content": "\\operatorname {a c c} @ \\mathrm {k} = \\prod_ {i = 1} ^ {k} p _ {i}, \\tag {2}", + "image_path": "1b8d93b28a0b52a7ea8408d1474e92753f6006374f38926ddc6dfc9956d37ae5.jpg" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 792, + 302, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 302, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 302, + 803 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 68, + 526, + 313 + ], + "blocks": [ + { + "bbox": [ + 70, + 68, + 526, + 313 + ], + "lines": [ + { + "bbox": [ + 70, + 68, + 526, + 313 + ], + "spans": [ + { + "bbox": [ + 70, + 68, + 526, + 313 + ], + "type": "image", + "image_path": "63656eb5bbdbb3adfe97f4b0f590a44d26894e6a8e5d44117cfa384530768d08.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 322, + 525, + 359 + ], + "lines": [ + { + "bbox": [ + 67, + 322, + 525, + 359 + ], + "spans": [ + { + "bbox": [ + 67, + 322, + 525, + 359 + ], + "type": "text", + "content": "Figure 8: LRMs exhibit under-accuracy and overthinking on simple problems. Shapes represent organizations, colors represent base model families, with darker colors indicating larger models, and connecting lines represent the relationships between model families and training." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 380, + 271, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 380, + 271, + 393 + ], + "spans": [ + { + "bbox": [ + 67, + 380, + 271, + 393 + ], + "type": "text", + "content": "B.4 Types and Analysis of Format Errors" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 396, + 291, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 396, + 291, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 396, + 291, + 544 + ], + "type": "text", + "content": "This section introduces a comprehensive taxonomy of format errors and highlights the importance of addressing these issues in future research. Unlike conventional LLMs, LRMs frequently exhibit format errors. These errors are defined by failing to use a unique end thinking marker (e.g.,) to separate the thinking process from the final answer. Format errors increase the difficulty of distinguishing the thinking process from the final answer and reveal the vulnerability of LRMs in following predefined formats." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 545, + 291, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 545, + 291, + 613 + ], + "spans": [ + { + "bbox": [ + 67, + 545, + 291, + 613 + ], + "type": "text", + "content": "To illustrate this phenomenon, we identify 12 distinct types of response formats produced by LRMs, each assigned a unique ID, as shown in Table 5. These 12 types are further grouped into three major categories:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 80, + 618, + 291, + 775 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 80, + 618, + 291, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 618, + 291, + 727 + ], + "spans": [ + { + "bbox": [ + 80, + 618, + 291, + 727 + ], + "type": "text", + "content": "- Standard-Conforming Responses: These responses meet the expected format by including exactly one end thinking marker (e.g., ) to delimit the thinking process from the final answer. Among these, type ID-100 includes a thinking process, while ID-101 omits it. The proportion of such responses is measured using the S-Corr metric." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 80, + 735, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 735, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 80, + 735, + 291, + 775 + ], + "type": "text", + "content": "- Unreadable Responses: These refer to generation failures, including cases where LRMs produce endlessly thinking content or solely" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 324, + 380, + 526, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 380, + 526, + 420 + ], + "spans": [ + { + "bbox": [ + 324, + 380, + 526, + 420 + ], + "type": "text", + "content": "produce end thinking markers. The proportion of all other (i.e., readable) responses is measured using the L-Corr metric." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 316, + 430, + 526, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 430, + 526, + 592 + ], + "spans": [ + { + "bbox": [ + 316, + 430, + 526, + 592 + ], + "type": "text", + "content": "- Readable but Malformed Responses: These responses deviate from the standard format yet still contain extractable information. In some cases, the final answer is missing (e.g., ID-200, ID-202, ID-205), and we instead evaluate the correctness of the thinking process. In other cases, multiple (e.g., ID-201, ID-203) or unmatched9 (e.g., ID-204, ID-206) end thinking markers are generated. In such instances, we treat the content following the last end thinking marker as the final answer for evaluation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 602, + 526, + 739 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 602, + 526, + 739 + ], + "spans": [ + { + "bbox": [ + 302, + 602, + 526, + 739 + ], + "type": "text", + "content": "Table 14 and Table 15 present the distributions of 12 format types under top-p sampling and greedy sampling, respectively. we find: (1) The infinite generation phenomenon is widespread across most LRMs, particularly concentrated in LRMs with fewer than 32B parameters. (2) The Nemotron and EXAONE families frequently produce correctly formatted responses without any explicit thinking processes. This behavior can be viewed as a mechanism for mitigating over-thinking. However," + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 746, + 525, + 773 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 746, + 525, + 773 + ], + "spans": [ + { + "bbox": [ + 302, + 746, + 525, + 773 + ], + "type": "text", + "content": "9This paper provides a reference collection of unmatched end thinking makers: " + }, + { + "bbox": [ + 302, + 746, + 525, + 773 + ], + "type": "inline_equation", + "content": "< /" + }, + { + "bbox": [ + 302, + 746, + 525, + 773 + ], + "type": "text", + "content": " think>, " + }, + { + "bbox": [ + 302, + 746, + 525, + 773 + ], + "type": "inline_equation", + "content": "< /" + }, + { + "bbox": [ + 302, + 746, + 525, + 773 + ], + "type": "text", + "content": " th think>, " + }, + { + "bbox": [ + 302, + 746, + 525, + 773 + ], + "type": "inline_equation", + "content": "< /" + }, + { + "bbox": [ + 302, + 746, + 525, + 773 + ], + "type": "text", + "content": " reason>, \\nanswer\\n, \\*\\*Final Answer\\*\\* and \\*\\*答案\\*." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 291, + 792, + 304, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 304, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 304, + 803 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 152 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 152 + ], + "type": "text", + "content": "the EXAONE family still exhibits substantial overthinking tendencies, suggesting that LRMs' capability to respond without visible reasoning and their tendency to overthink may be orthogonal characteristics. (3) None of the evaluated LRMs exhibited behaviors classified as ID-205/206." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 69, + 159, + 291, + 309 + ], + "blocks": [ + { + "bbox": [ + 69, + 159, + 291, + 309 + ], + "lines": [ + { + "bbox": [ + 69, + 159, + 291, + 309 + ], + "spans": [ + { + "bbox": [ + 69, + 159, + 291, + 309 + ], + "type": "table", + "html": "
FormatIDmarker (standard)marker (unmatched)marker (number)thinking processfinal answer
Standard100-1
101-1×
Readable but Malformed200-1×
201->1
202->1×
203->1×
204×≥1
205×≥1×
206×≥1×
207××0-
Unreadable300≥1××
301××0-
", + "image_path": "d1f06c5b686490da5727a7d2a7d29bd968a112ef4adbf0a757b669783d69a035.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 356, + 282, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 356, + 282, + 370 + ], + "spans": [ + { + "bbox": [ + 67, + 356, + 282, + 370 + ], + "type": "text", + "content": "C More Experimental Setups & Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 378, + 216, + 391 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 378, + 216, + 391 + ], + "spans": [ + { + "bbox": [ + 67, + 378, + 216, + 391 + ], + "type": "text", + "content": "C.1 Greedy Sampling Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 395, + 290, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 395, + 290, + 464 + ], + "spans": [ + { + "bbox": [ + 67, + 395, + 290, + 464 + ], + "type": "text", + "content": "Table 16 presents the performance of LRMs on S1-Bench under greedy sampling. While overall accuracy improves compared to top-p sampling, issues of inefficiency and accuracy degradation on simple questions remain." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 473, + 288, + 486 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 473, + 288, + 486 + ], + "spans": [ + { + "bbox": [ + 67, + 473, + 288, + 486 + ], + "type": "text", + "content": "C.2 Efficiency Analysis across Subcategories." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 491, + 291, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 491, + 291, + 558 + ], + "spans": [ + { + "bbox": [ + 67, + 491, + 291, + 558 + ], + "type": "text", + "content": "Figure 9 illustrates the average response tokens across the 28 subcategories. In the heatmap, both models (rows) and subcategories (columns) are ordered in descending order according to their average number of response tokens." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 568, + 214, + 581 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 568, + 214, + 581 + ], + "spans": [ + { + "bbox": [ + 67, + 568, + 214, + 581 + ], + "type": "text", + "content": "C.3 Solution Analysis Details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 585, + 291, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 585, + 291, + 721 + ], + "spans": [ + { + "bbox": [ + 67, + 585, + 291, + 721 + ], + "type": "text", + "content": "For solution analysis, We only use well-formatted thinking processes with correct final answers, as incorrect answers make it unclear whether LRMs are over-reasoning or under-reasoning, and malformed thinking processes cannot be precisely extracted. The segmentation process is performed by DeepSeek-v3, with prompts detailed in Table 17. We compute the average token count in the first solution round; if no solution is found, we use the token count of the entire thinking process." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 730, + 246, + 743 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 730, + 246, + 743 + ], + "spans": [ + { + "bbox": [ + 67, + 730, + 246, + 743 + ], + "type": "text", + "content": "C.4 Thinking Redundancy Analysis" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 749, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 749, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 749, + 290, + 775 + ], + "type": "text", + "content": "We conduct a similarity analysis to analyze how information redundancy in the thinking processes" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 71, + 526, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 248 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 248 + ], + "type": "text", + "content": "changes as reasoning sequences increase. Specifically, we first divide the complete thinking process into k equal-length segments10. Then, we encode each segment using the all-MiniLM-L6-v2 model11. For each segment, we calculate the cosine similarity with all its preceding segments and use the maximum similarity as a measure of its information redundancy. As shown in Figure 10, information redundancy increases across all four main categories as reasoning sequences increase. Sky-T1-32B shows overall lower similarity, which stems from its shorter thinking process, but still demonstrates an upward trend." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 256, + 437, + 269 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 256, + 437, + 269 + ], + "spans": [ + { + "bbox": [ + 302, + 256, + 437, + 269 + ], + "type": "text", + "content": "C.5 Error Analysis Details" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 273, + 526, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 273, + 526, + 408 + ], + "spans": [ + { + "bbox": [ + 302, + 273, + 526, + 408 + ], + "type": "text", + "content": "In error analysis, we only use well-formatted samples, as malformed thinking processes cannot be precisely extracted. For samples with correct final answers, we categorize them based on whether the thinking process contains explicit incorrect conclusions in intermediate steps. For samples with incorrect final answers, we categorize them based on whether the correct answer is mentioned at least once during reasoning. We use DeepSeek-v3 for categorization, with prompts provided in Table 18." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 417, + 471, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 417, + 471, + 430 + ], + "spans": [ + { + "bbox": [ + 302, + 417, + 471, + 430 + ], + "type": "text", + "content": "C.6 Gut Moment Analysis Details" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 434, + 525, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 434, + 525, + 556 + ], + "spans": [ + { + "bbox": [ + 302, + 434, + 525, + 556 + ], + "type": "text", + "content": "We prompt GPT-4o to classify the initial part of model responses (before the first '\\n\\ntypes based on its comment on difficulty: easy, neutral, difficult, and no comment. The prompts for english question can be seen in Table 19. For Chinese queries, we use the translated version of the prompt in Chinese. In Table 6, we show the most common sentence of all LRMs in each type of \"gut moment.\"" + } + ] + } + ], + "index": 16 + }, + { + "type": "table", + "bbox": [ + 306, + 565, + 522, + 647 + ], + "blocks": [ + { + "bbox": [ + 93, + 317, + 263, + 329 + ], + "lines": [ + { + "bbox": [ + 93, + 317, + 263, + 329 + ], + "spans": [ + { + "bbox": [ + 93, + 317, + 263, + 329 + ], + "type": "text", + "content": "Table 5: Twelve types of response format." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 306, + 565, + 522, + 647 + ], + "lines": [ + { + "bbox": [ + 306, + 565, + 522, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 565, + 522, + 647 + ], + "type": "table", + "html": "
TypeSentenceCount
easy-zh这个问题看起来挺简单的308
easy-enthat seems straightforward36
difficult-zh这个问题看起来有点复杂308
difficult-enpercentages can sometimes be tricky7
neutral-zh这个问题看起来好像不难24
neutral-enHmm, interesting3
", + "image_path": "3619fa05d9596f3dfac74b2e6f30bf27955c5389fa40972a22f2528776849588.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 655, + 525, + 680 + ], + "lines": [ + { + "bbox": [ + 302, + 655, + 525, + 680 + ], + "spans": [ + { + "bbox": [ + 302, + 655, + 525, + 680 + ], + "type": "text", + "content": "Table 6: The most common sentence in each type of \"gut moment.\"" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 303, + 703, + 389, + 715 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 703, + 389, + 715 + ], + "spans": [ + { + "bbox": [ + 303, + 703, + 389, + 715 + ], + "type": "text", + "content": "D Error Cases" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 724, + 524, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 724, + 524, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 724, + 524, + 750 + ], + "type": "text", + "content": "This section presents several error cases observed in LRMs. See Tables 20, 21, 22, 23, 24, and 25." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 754, + 524, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 754, + 524, + 775 + ], + "spans": [ + { + "bbox": [ + 311, + 754, + 524, + 775 + ], + "type": "text", + "content": "10We set " + }, + { + "bbox": [ + 311, + 754, + 524, + 775 + ], + "type": "inline_equation", + "content": "k = 15" + }, + { + "bbox": [ + 311, + 754, + 524, + 775 + ], + "type": "text", + "content": " , changing its value does not affect the conclusions. 11 https://huggingface.co/sentence-transformers/allMiniLM-L6-v2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 291, + 792, + 302, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 302, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 302, + 803 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 184, + 523, + 623 + ], + "blocks": [ + { + "bbox": [ + 70, + 184, + 523, + 623 + ], + "lines": [ + { + "bbox": [ + 70, + 184, + 523, + 623 + ], + "spans": [ + { + "bbox": [ + 70, + 184, + 523, + 623 + ], + "type": "table", + "html": "
Paper AbbreviationMATHGSM8KAIMEGPQAAMCMMLUOlympiad-BenchSVAMPLiveCode-BenchCommon-SenseQA
Codi (Shen et al., 2025b)
CISC (Taubenfeld et al., 2025)
CoT-Valve (Ma et al., 2025b)
Dast (Shen et al., 2025a)
ATM (Chen et al., 2024a)
DEER (Yang et al., 2025b)
DPTS (Ding et al., 2025)
Dynasor (Fu et al., 2024)
ESC (Li et al., 2024)
Hawkeye (She et al., 2025)
token complexity (Lee et al., 2025)
INFTYTHINK (Yan et al., 2025)
KIMI K1.5 (Team et al., 2025a)
L1 (Aggarwal and Welleck, 2025)
LightThinker (Zhang et al., 2025a)
LS-Mixture SFT (Yu et al., 2025a)
DSC (Wang et al., 2024)
O1-Pruner (Luo et al., 2025)
MRT (Qu et al., 2025b)
Self-Doubt (Fu et al., 2025)
RASC (Wan et al., 2024)
NoThinking (Ma et al., 2025a)
Retro-Search (Lu et al., 2025)
RSD (Liao et al., 2025)
ST-BoN (Wang et al., 2025b)
Elastic Reasoning (Xu et al., 2025b)
FS-BoN (Munkhbat et al., 2025)
SoT (Aytes et al., 2025)
SpecReason (Pan et al., 2025)
Speculative Thinking (Yang et al., 2025d)
SPIRIT (Cui et al., 2025)
ITC Analysis (Wang et al., 2025a)
Think when needed (Yang et al., 2025c)
THINKPRUNE (Hou et al., 2025)
TALE (Han et al., 2024)
TokenSkip (Xia et al., 2025)
TOPS (Yang et al., 2025e)
efficient reasoning (Arora and Zanette, 2025)
TWT (Xu et al., 2025a)
Z1 (Yu et al., 2025b)
Count28242011866555
", + "image_path": "2f0be290628cb975dda0a9c4b4129748f34ac5ba492c7195d0629498b7739b93.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 630, + 525, + 655 + ], + "lines": [ + { + "bbox": [ + 67, + 630, + 525, + 655 + ], + "spans": [ + { + "bbox": [ + 67, + 630, + 525, + 655 + ], + "type": "text", + "content": "Table 7: A total of 40 studies on LRM efficiency before May 2025 were included. Benchmarks that appeared more than four times are listed." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 77, + 144, + 514, + 676 + ], + "blocks": [ + { + "bbox": [ + 77, + 144, + 514, + 676 + ], + "lines": [ + { + "bbox": [ + 77, + 144, + 514, + 676 + ], + "spans": [ + { + "bbox": [ + 77, + 144, + 514, + 676 + ], + "type": "table", + "html": "
cate.subcategoriesExplanation and cases
reasoning questionnumerical reasoningQuestions that require performing basic mathematical operations or solving simple algebraic equations to arrive at a numerical answer.\nCase: What's two plus three?
code reasoningQuestions that require tracing through and executing simple code snippets to determine their output or behavior when run in a specific programming environment.\nCase: What is the output of the following code when run in Python 3 environment: word = "hello"\\nprint(len(word))
set reasoningQuestions that require applying simple syllogistic reasoning to determine whether elements belong to sets based on clearly stated relationships.\nCase: All squares are quadrilaterals. A shape is a square, is it a quadrilateral?
temporal reasoningQuestions that require calculating time durations, ages, or future dates by applying simple arithmetic operations to temporal information.\nCase: How many minutes equal 120 seconds?
spatial reasoningQuestions that require determining relative positions, directions, or orientations of objects in space based on simple spatial relationships.\nCase: If a bird is flying above a tree, where is the tree in relation to the bird?
causal reasoningQuestions that require determining outcomes by applying simple cause-and-effect relationships based on given conditional statements.\nCase: If ferromagnetic material is placed in a magnetic field, it will become magnetized. An iron nail was placed next to a strong magnet for some time. Has the nail been magnetized?
natural law reasoningQuestions that require applying basic knowledge of physical laws and natural phenomena to predict simple observable outcomes in everyday scenarios.\nCase: Which is faster, an airplane or the propagation of light?
knowledge questiongeometry factsQuestions that require recalling simple and fundamental geometric properties about shapes, angles, and basic geometric figures.\nCase: How many angles does a trapezoid have?
geographic factsQuestions that require recalling simple factual information about locations, landmarks, political divisions, celestial bodies, and other basic geographic knowledge.\nCase: Which is the largest continent on Earth?
historical factsQuestions that require recalling basic facts about historical events.\nCase: Which country first invented paper?
biographical factsQuestions that require recalling basic facts about the identities, achievements, and characteristics of historical figures.\nCase: Who proposed the theory of universal gravitation?
measurement unitsQuestions that require recalling simple conversion relationships between standard units of measurement.\nCase: How many centimeters equal 1 meter?
scientific notationQuestions that require recalling basic scientific symbols, formulas, and standard units used in scientific communication.\nCase: What is the chemical symbol for oxygen?
creative authorshipQuestions that require recalling the creators or originators of notable artistic, literary, musical, and cultural works.\nCase: Who is the author of Hamlet?
", + "image_path": "832fc96c01a1dfa5c068c72fb411254118d8eaa8000d44b0e31f4b129b28d3d1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 99, + 684, + 492, + 697 + ], + "lines": [ + { + "bbox": [ + 99, + 684, + 492, + 697 + ], + "spans": [ + { + "bbox": [ + 99, + 684, + 492, + 697 + ], + "type": "text", + "content": "Table 8: The subcategory descriptions and cases of reasoning questions and knowledge questions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 792, + 303, + 802 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 303, + 802 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 303, + 802 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 69, + 149, + 523, + 671 + ], + "blocks": [ + { + "bbox": [ + 69, + 149, + 523, + 671 + ], + "lines": [ + { + "bbox": [ + 69, + 149, + 523, + 671 + ], + "spans": [ + { + "bbox": [ + 69, + 149, + 523, + 671 + ], + "type": "table", + "html": "
cate.subcatrgoriesExplanation and cases
instruction followingrepetition constraintsQuestions that require outputting specified characters, words, or phrases a specific number of times according to simple formatting instructions.\nCase: Output the number "7" four times, without using separators.
length constraintsQuestions that require generating outputs of a specific length or with a specific number of components based on simple counting constraints.\nCase: Output a four-digit number.
character constraintsQuestions that require generating words or numbers that conform to simple specified character patterns or formatting rules.\nCase: Output a number that begins with 8.
counting constraintsQuestions that require counting specific characters or elements within a given text or sequence.\nCase: Output the number of letter "y" in the word "yes".
transformation constraintsQuestions that require modifying text or numbers according to simple formatting or character substitution rules to produce a transformed output.\nCase: Output the word "good" with all letters capitalized directly.
sentence constraintsQuestions that require generating sentences that conform to simple specified content or structural requirements.\nCase: Give a sentence that contains the phrase "have lunch" directly.
analysis questionsentiment classificationQuestions that require determining whether simple statements express positive or negative emotions based on the tone and word choice.\nCase: Does the sentence "I hate rainy days." express a positive or negative emotion?
named entity recognitionQuestions that require identifying the correct category of named entities (such as people, places, organizations, or time expressions) within simple sentences.\nCase: In the sentence "Napoleon died in 1821", is "1821" a time or a place name?
language classificationQuestions that require identifying the language of origin for simple words or phrases based on their characteristic writing systems or common vocabulary.\nCase: Is the word "hello" English or Japanese?
topic classificationQuestions that require identifying the primary subject matter or thematic category of simple sentences based on their content and context clues.\nCase: Is the topic of the sentence "The stock market rose 2% today" finance or technology?
intent recognitionQuestions that require determining the communicative purpose behind simple utterances or statements based on their phrasing and context.\nCase: Is the intention of the sentence "I'm sorry I'm late." to apologize or to blame?
syntax classificationQuestions that require identifying the correct grammatical structure or sentence type of simple expressions based on their form, punctuation, and communicative function.\nCase: Is "Close the door!" an imperative sentence or an interrogative sentence?
grammar classificationQuestions that require identifying simple grammatical properties (like tense, voice, or polarity) of sentences based on their structure and verb forms.\nCase: Is "The apple was eaten." in active voice or passive voice?
coreference resolutionQuestions that require identifying which entity a pronoun or reference term refers to in simple sentences by tracking relationships between words in the text.\nCase: In "My computer is broken, and I need to fix it." What does "it" refer to?
", + "image_path": "1ca33c1ae16b6590af6a367ff5a0eae2b58a329182ae7f749aecd0ce9bf5cb6f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 679, + 509, + 692 + ], + "lines": [ + { + "bbox": [ + 83, + 679, + 509, + 692 + ], + "spans": [ + { + "bbox": [ + 83, + 679, + 509, + 692 + ], + "type": "text", + "content": "Table 9: The subcategory descriptions and cases of instruction following questions and analysis questions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 137, + 244, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 137, + 244, + 148 + ], + "spans": [ + { + "bbox": [ + 69, + 137, + 244, + 148 + ], + "type": "text", + "content": "Prompt for construction workflow for S1-Bench" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 151, + 158, + 162 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 151, + 158, + 162 + ], + "spans": [ + { + "bbox": [ + 69, + 151, + 158, + 162 + ], + "type": "text", + "content": "Data Generation Prompt" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 164, + 525, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 164, + 525, + 184 + ], + "spans": [ + { + "bbox": [ + 69, + 164, + 525, + 184 + ], + "type": "text", + "content": "Generate 50 pairs of questions and answers in both Chinese and English based on the category's name, definition, and specific simplicity criteria. The following conditions must be satisfied:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 183, + 399, + 210 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 69, + 183, + 399, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 183, + 399, + 192 + ], + "spans": [ + { + "bbox": [ + 69, + 183, + 399, + 192 + ], + "type": "text", + "content": "1. Questions must be naturally and clearly expressed, unambiguous, and free of intentional traps." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 192, + 393, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 192, + 393, + 200 + ], + "spans": [ + { + "bbox": [ + 69, + 192, + 393, + 200 + ], + "type": "text", + "content": "2. Answers must be unique or easily falsifiable, with no possibility of multiple correct answers." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 200, + 221, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 200, + 221, + 210 + ], + "spans": [ + { + "bbox": [ + 69, + 200, + 221, + 210 + ], + "type": "text", + "content": "3. Make the questions as diverse as possible." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 213, + 184, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 213, + 184, + 231 + ], + "spans": [ + { + "bbox": [ + 69, + 213, + 184, + 231 + ], + "type": "text", + "content": "Category Name and Definition: {name_and_defined}" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 235, + 172, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 235, + 172, + 253 + ], + "spans": [ + { + "bbox": [ + 69, + 235, + 172, + 253 + ], + "type": "text", + "content": "Specific Simplicity Criteria: {criteria}" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 257, + 140, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 257, + 140, + 301 + ], + "spans": [ + { + "bbox": [ + 69, + 257, + 140, + 301 + ], + "type": "text", + "content": "Cases: \n## English question: {question_en} \n## English Answer: {answer_en}" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 304, + 141, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 304, + 141, + 339 + ], + "spans": [ + { + "bbox": [ + 69, + 304, + 141, + 339 + ], + "type": "text", + "content": "```c\n## Chinese question:\n{question_zh}\n## Chinese Answer:\n{answer_zh}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 343, + 427, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 343, + 427, + 361 + ], + "spans": [ + { + "bbox": [ + 69, + 343, + 427, + 361 + ], + "type": "text", + "content": "Please generate 50 pairs of Chinese and English questions and answers in the following format: [question]English-question[answer]English-answer[question]Chinese-question[answer]Chinese-answer..." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 364, + 127, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 364, + 127, + 375 + ], + "spans": [ + { + "bbox": [ + 69, + 364, + 127, + 375 + ], + "type": "text", + "content": "Start generating:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 379, + 180, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 379, + 180, + 389 + ], + "spans": [ + { + "bbox": [ + 69, + 379, + 180, + 389 + ], + "type": "text", + "content": "Quality Discrimination Prompt" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 392, + 496, + 437 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 69, + 392, + 496, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 392, + 496, + 410 + ], + "spans": [ + { + "bbox": [ + 69, + 392, + 496, + 410 + ], + "type": "text", + "content": "Given a question, its answer, and its category, please analyze from the following perspectives as comprehensively as possible: 1. Whether the question belongs to the specified category and meet the Specific Simplicity Criteria." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 410, + 391, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 410, + 391, + 419 + ], + "spans": [ + { + "bbox": [ + 69, + 410, + 391, + 419 + ], + "type": "text", + "content": "2. Whether the question is easy, clear, unambiguous, and has an absolutely unique answer." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 419, + 362, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 419, + 362, + 427 + ], + "spans": [ + { + "bbox": [ + 69, + 419, + 362, + 427 + ], + "type": "text", + "content": "3. Whether the answer is absolutely correct; if not, what the correct answer should be." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 427, + 489, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 427, + 489, + 437 + ], + "spans": [ + { + "bbox": [ + 69, + 427, + 489, + 437 + ], + "type": "text", + "content": "4. Whether the question is similar to other given questions, and if similar, whether more diverse questions can be generated." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 440, + 184, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 440, + 184, + 458 + ], + "spans": [ + { + "bbox": [ + 69, + 440, + 184, + 458 + ], + "type": "text", + "content": "Category Name and Definition: {name_and_defined}" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 462, + 172, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 462, + 172, + 481 + ], + "spans": [ + { + "bbox": [ + 69, + 462, + 172, + 481 + ], + "type": "text", + "content": "Specific Simplicity Criteria: {criteria}" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 484, + 154, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 484, + 154, + 502 + ], + "spans": [ + { + "bbox": [ + 69, + 484, + 154, + 502 + ], + "type": "text", + "content": "Question and Answer: {question_with_answer}" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 506, + 135, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 506, + 135, + 523 + ], + "spans": [ + { + "bbox": [ + 69, + 506, + 135, + 523 + ], + "type": "text", + "content": "Other Questions: {questions_list}" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 527, + 331, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 527, + 331, + 538 + ], + "spans": [ + { + "bbox": [ + 69, + 527, + 331, + 538 + ], + "type": "text", + "content": "Begin your analysis, aiming to be as detailed and comprehensive as possible:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 541, + 170, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 541, + 170, + 552 + ], + "spans": [ + { + "bbox": [ + 69, + 541, + 170, + 552 + ], + "type": "text", + "content": "Difficulty Reduction Prompt" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 69, + 555, + 524, + 670 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 69, + 555, + 524, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 555, + 524, + 574 + ], + "spans": [ + { + "bbox": [ + 69, + 555, + 524, + 574 + ], + "type": "text", + "content": "Given a question and answer that are too complex for the model to answer correctly, you need to further reduce their difficulty while trying to:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 69, + 574, + 302, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 574, + 302, + 582 + ], + "spans": [ + { + "bbox": [ + 69, + 574, + 302, + 582 + ], + "type": "text", + "content": "- Ensure the question aligns with the Category Name and Definition." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 69, + 582, + 273, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 582, + 273, + 591 + ], + "spans": [ + { + "bbox": [ + 69, + 582, + 273, + 591 + ], + "type": "text", + "content": "- Ensure the question meets the Specific Simplicity Criteria." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 69, + 595, + 183, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 595, + 183, + 613 + ], + "spans": [ + { + "bbox": [ + 69, + 595, + 183, + 613 + ], + "type": "text", + "content": "Category Name and Definition: {name_and_defined}" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 69, + 616, + 171, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 616, + 171, + 634 + ], + "spans": [ + { + "bbox": [ + 69, + 616, + 171, + 634 + ], + "type": "text", + "content": "Specific Simplicity Criteria: {criteria}" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 69, + 638, + 154, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 638, + 154, + 656 + ], + "spans": [ + { + "bbox": [ + 69, + 638, + 154, + 656 + ], + "type": "text", + "content": "Question and Answer: {question_with_answer}" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 69, + 660, + 173, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 660, + 173, + 670 + ], + "spans": [ + { + "bbox": [ + 69, + 660, + 173, + 670 + ], + "type": "text", + "content": "The new question and answer:" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 682, + 524, + 706 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 682, + 524, + 706 + ], + "spans": [ + { + "bbox": [ + 67, + 682, + 524, + 706 + ], + "type": "text", + "content": "Table 10: \"Category Name and Definition\" refers to the subcategory name and its definition, while Specific Simplicity Criteria refers to the simplicity requirements specific to the main category." + } + ] + } + ], + "index": 33, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 118, + 489, + 224 + ], + "blocks": [ + { + "bbox": [ + 106, + 118, + 489, + 224 + ], + "lines": [ + { + "bbox": [ + 106, + 118, + 489, + 224 + ], + "spans": [ + { + "bbox": [ + 106, + 118, + 489, + 224 + ], + "type": "table", + "html": "
ModelModel IDURL
Qwen2.5-7BQwen2.5-7B-Instructhttps://huggingface.co/Qwen/Qwen2.5-7B-Instruct
Llama3.1-8BLlama-3.1-8B-Instructhttps://huggingface.co/meta-llama/Llama-3.1-8B-Instruct
Mistral-8BMinistral-8B-Instruct-2410https://huggingface.co/mistralai/Ministral-8B-Instruct-2410
Gemma2-9Bgemma-2-9b-ithttps://huggingface.co/google/gemma-2-9b-it
Qwen2.5-14BQwen2.5-14B-Instructhttps://huggingface.co/Qwen/Qwen2.5-14B-Instruct
Qwen2.5-32BQwen2.5-32B-Instructhttps://huggingface.co/Qwen/Qwen2.5-32B-Instruct
Qwen2.5-72BQwen2.5-72B-Instructhttps://huggingface.co/Qwen/Qwen2.5-72B-Instruct
Llama3.3-70BLlama-3.3-70B-Instructhttps://huggingface.co/meta-llama/Llama-3.3-70B-Instruct
DeepSeek-v3DeepSeek-V3-0324https://huggingface.co/deepseek-ai/DeepSeek-V3-0324
", + "image_path": "ce7830741f441d5ce070ca571bc7c8b76adf816c5b619ffa656b9c0daef6fea3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 79, + 350, + 514, + 702 + ], + "blocks": [ + { + "bbox": [ + 96, + 232, + 495, + 244 + ], + "lines": [ + { + "bbox": [ + 96, + 232, + 495, + 244 + ], + "spans": [ + { + "bbox": [ + 96, + 232, + 495, + 244 + ], + "type": "text", + "content": "Table 11: Mapping of LLM abbreviations and IDs used in this paper, with their open-source URLs." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 350, + 514, + 702 + ], + "lines": [ + { + "bbox": [ + 79, + 350, + 514, + 702 + ], + "spans": [ + { + "bbox": [ + 79, + 350, + 514, + 702 + ], + "type": "table", + "html": "
Model IDAbbreviationBase ModelAlg.Size
DeepSeek
DeepSeek-R1-Distill-Qwen-1.5BDS-R1-1.5BQwen2.5-Math-1.5BSFT800K
DeepSeek-R1-Distill-Qwen-7BDS-R1-7BQwen2.5-Math-7BSFT800K
DeepSeek-R1-Distill-Llama-8BDS-R1-8BLlama-3.1-8BSFT800K
DeepSeek-R1-Distill-Qwen-14BDS-R1-14BQwen2.5-14BSFT800K
DeepSeek-R1-Distill-Qwen-32BDS-R1-32BQwen2.5-32BSFT800K
DeepSeek-R1-Distill-Llama-70BDS-R1-70BLlama-3.3-70B-InstructSFT800K
DeepSeek-R1DS-R1DeepSeek-V3-0324SFT&RL800K&-
Qwen
QwQ-32BQwQ-32BQwen2.5-32B--
Qwen3-235B-A22BQwen3-A22BQwen3-235B-A22B-BaseSFT&RL-&-
Qwen3-30B-A3BQwen3-A3BQwen3-30B-A3B-BaseSFT&RL-&-
Qwen3-32BQwen3-32BQwen3-32B-BaseSFT&RL-&-
Qwen3-14BQwen3-14BQwen3-14B-BaseSFT&RL-&-
Qwen3-8BQwen3-8BQwen3-8B-BaseSFT&RL-&-
Qwen3-1.7BQwen3-1.7BQwen3-1.7B-BaseSFT&RL-&-
qihoo360
Light-R1-7B-DSL-R1-7B-DSDeepSeek-R1-Distill-Qwen-7BSFT3K
Light-R1-14B-DSL-R1-14B-DSDeepSeek-R1-Distill-Qwen-14BSFT&RL3K&-
Light-R1-32B-DSL-R1-32B-DSDeepSeek-R1-Distill-Qwen-32BSFT3K
Light-R1-32BL-R1-32BQwen2.5-32B-InstructSFT&DPO73K&-
simplescaling
s1.1-7Bs1.1-7BQwen2.5-7B-InstructSFT1K
s1.1-14Bs1.1-14BQwen2.5-14B-InstructSFT1K
s1.1-32Bs1.1-32BQwen2.5-32B-InstructSFT1K
LG AI Research
EXAONE-Deep-2.4BEXAONE-2.4BEXAONE-3.5-2.4B-InstructSFT&DPO&RL1.6M&20K&10K
EXAONE-Deep-7.8BEXAONE-7.8BEXAONE-3.5-7.8B-InstructSFT&DPO&RL1.6M&20K&10K
EXAONE-Deep-32BEXAONE-32BEXAONE-3.5-32B-InstructSFT&DPO&RL1.6M&20K&10K
NVIDIA
Llama-3.1-Nemotron-Nano-8B-v1Nemotron-8BLlama-3.1-8B-InstructSFT&RL-&-
Llama-3.3-Nemotron-Super-49B-v1Nemotron-49BLlama-3.3-70B-InstructSFT&RL-&-
NovaSky
Sky-T1-32B-FlashSky-T1-32BQwen2.5-32B-InstructSFT&SimPO17K&10K
", + "image_path": "1a981bd1901d5cbde725a3f1aa0862ca5bcdcf4e957d236157126d10111daeb5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 164, + 709, + 428, + 721 + ], + "lines": [ + { + "bbox": [ + 164, + 709, + 428, + 721 + ], + "spans": [ + { + "bbox": [ + 164, + 709, + 428, + 721 + ], + "type": "text", + "content": "Table 12: The open-source LRMs details evaluated for S1-Bench." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "spans": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 99, + 198, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 99, + 198, + 111 + ], + "spans": [ + { + "bbox": [ + 69, + 99, + 198, + 111 + ], + "type": "text", + "content": "Prompt for Correctness Evaluation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 118, + 157, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 118, + 157, + 128 + ], + "spans": [ + { + "bbox": [ + 69, + 118, + 157, + 128 + ], + "type": "text", + "content": "Evaluation on S1-Bench" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 137, + 121, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 137, + 121, + 147 + ], + "spans": [ + { + "bbox": [ + 69, + 137, + 121, + 147 + ], + "type": "text", + "content": "**Question:**" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 149, + 107, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 149, + 107, + 161 + ], + "spans": [ + { + "bbox": [ + 69, + 149, + 107, + 161 + ], + "type": "text", + "content": "{question}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 170, + 141, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 170, + 141, + 179 + ], + "spans": [ + { + "bbox": [ + 69, + 170, + 141, + 179 + ], + "type": "text", + "content": "**Model Answer:**" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 183, + 127, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 183, + 127, + 193 + ], + "spans": [ + { + "bbox": [ + 69, + 183, + 127, + 193 + ], + "type": "text", + "content": "{model_answer}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 203, + 136, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 203, + 136, + 213 + ], + "spans": [ + { + "bbox": [ + 69, + 203, + 136, + 213 + ], + "type": "text", + "content": "**Ground Truth:**" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 216, + 122, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 216, + 122, + 227 + ], + "spans": [ + { + "bbox": [ + 69, + 216, + 122, + 227 + ], + "type": "text", + "content": "{groundtruth}" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 237, + 282, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 237, + 282, + 247 + ], + "spans": [ + { + "bbox": [ + 69, + 237, + 282, + 247 + ], + "type": "text", + "content": "Your task is to evaluate whether the model's answer is correct." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 249, + 508, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 249, + 508, + 259 + ], + "spans": [ + { + "bbox": [ + 69, + 249, + 508, + 259 + ], + "type": "text", + "content": "An answer is considered correct as long as it contains the ground truth (regardless of how complex or detailed the description is)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 262, + 524, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 262, + 524, + 280 + ], + "spans": [ + { + "bbox": [ + 68, + 262, + 524, + 280 + ], + "type": "text", + "content": "If there are parenthetical notes after the ground truth, then there may be multiple correct answers. In this case, the given answer is just one example, and any answer that meets the requirements specified in the notes can be considered correct." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 283, + 524, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 283, + 524, + 302 + ], + "spans": [ + { + "bbox": [ + 68, + 283, + 524, + 302 + ], + "type": "text", + "content": "Additionally, some reasonably uncertain supplementary information is also considered appropriate, including more details, possibilities, and expanded discussion. You should focus more on whether the reply contains the correct answer." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 312, + 524, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 312, + 524, + 330 + ], + "spans": [ + { + "bbox": [ + 67, + 312, + 524, + 330 + ], + "type": "text", + "content": "You need to output a standard JSON, providing your explanation of the evaluation in the \"explain\" field, and giving the evaluation result in the \"result\" field, where 1 means the answer is correct and 0 means it is incorrect." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 333, + 309, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 333, + 309, + 343 + ], + "spans": [ + { + "bbox": [ + 69, + 333, + 309, + 343 + ], + "type": "text", + "content": "Your action should follow the given format: \"explain\": \"\", \"result\": 0/1" + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 67, + 426, + 526, + 726 + ], + "blocks": [ + { + "bbox": [ + 204, + 357, + 387, + 368 + ], + "lines": [ + { + "bbox": [ + 204, + 357, + 387, + 368 + ], + "spans": [ + { + "bbox": [ + 204, + 357, + 387, + 368 + ], + "type": "text", + "content": "Table 13: Prompt for Correctness Evaluation." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 67, + 426, + 526, + 726 + ], + "lines": [ + { + "bbox": [ + 67, + 426, + 526, + 726 + ], + "spans": [ + { + "bbox": [ + 67, + 426, + 526, + 726 + ], + "type": "table", + "html": "
ModelStandardReadable but MalformedUnreadable
100101200201202203204205206207300301
Qwen3-A22B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-A3B100.000.000.000.000.000.000.000.000.000.000.000.00
QwQ-32B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-32B99.910.000.000.000.000.000.000.000.000.000.000.09
Qwen3-14B99.950.000.000.000.000.000.000.000.000.000.000.05
Qwen3-8B99.950.000.000.000.000.000.000.000.000.000.000.05
Qwen3-1.7B99.810.000.000.000.000.000.000.000.000.000.000.19
Hunyuan-T1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-70B99.910.000.090.000.000.000.000.000.000.000.000.00
DS-R1-32B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-14B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-8B99.530.000.000.000.000.000.000.000.000.240.000.24
DS-R1-7B99.240.000.000.000.000.000.000.000.000.000.000.76
DS-R1-1.5B97.580.000.000.000.000.000.000.000.000.000.002.42
Sky-T1-32B95.260.000.620.090.190.000.280.000.003.030.000.52
Nemotron-49B66.0733.930.000.000.000.000.000.000.000.000.000.00
Nemotron-8B58.0626.260.000.000.000.090.000.000.0015.020.000.57
L-R1-32B95.070.000.000.000.000.000.810.000.003.030.001.09
L-R1-32B-DS99.810.000.000.000.000.000.000.000.000.000.000.19
L-R1-14B-DS99.190.000.000.000.000.000.000.000.000.000.000.81
L-R1-7B-DS99.670.000.050.050.000.000.000.000.000.000.000.24
s1.1-32B99.530.000.000.050.000.000.000.000.000.000.000.43
s1.1-14B97.390.000.000.140.000.000.240.000.000.000.002.23
s1.1-7B88.960.000.007.960.090.000.000.000.000.090.002.89
EXAONE-32B67.3932.420.000.000.000.000.000.000.000.000.000.19
EXAONE-7.8B65.8332.230.000.000.050.470.000.000.000.140.001.28
EXAONE-2.4B81.4215.830.000.090.000.050.000.000.000.050.002.56
", + "image_path": "e102964c50a709822136bdbfe994f39d60087b6ab50cc89332a3509b36706e08.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 195, + 734, + 396, + 746 + ], + "lines": [ + { + "bbox": [ + 195, + 734, + 396, + 746 + ], + "spans": [ + { + "bbox": [ + 195, + 734, + 396, + 746 + ], + "type": "text", + "content": "Table 14: Format type rates under top-p sampling." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "spans": [ + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 68, + 259, + 525, + 559 + ], + "blocks": [ + { + "bbox": [ + 68, + 259, + 525, + 559 + ], + "lines": [ + { + "bbox": [ + 68, + 259, + 525, + 559 + ], + "spans": [ + { + "bbox": [ + 68, + 259, + 525, + 559 + ], + "type": "table", + "html": "
ModelStandardReadable but MalformedUnreadable
100101200201202203204205206207300301
Qwen3-A22B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-A3B100.000.000.000.000.000.000.000.000.000.000.000.00
QwQ-32B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-32B99.760.000.000.000.000.000.000.000.000.000.000.24
Qwen3-14B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-8B100.000.000.000.000.000.000.000.000.000.000.000.00
Qwen3-1.7B99.760.000.000.000.000.000.000.000.000.000.000.24
Hunyuan-T1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-70B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-32B100.000.000.000.000.000.000.000.000.000.000.000.00
DS-R1-14B99.760.000.000.000.000.000.000.000.000.000.000.24
DS-R1-8B99.530.000.000.000.000.000.000.000.000.240.000.24
DS-R1-7B97.870.000.000.000.000.000.000.000.000.000.002.13
DS-R1-1.5B91.940.000.000.000.000.000.000.000.000.000.008.06
Sky-T1-32B99.290.000.000.000.000.000.000.000.000.470.000.24
Nemotron-49B60.9039.100.000.000.000.000.000.000.000.000.000.00
Nemotron-8B55.2126.780.000.000.000.000.000.000.0016.350.001.66
L-R1-32B85.550.240.000.240.710.240.950.000.006.642.612.84
L-R1-32B-DS99.290.000.000.000.000.000.000.000.000.000.000.71
L-R1-14B-DS98.820.000.000.000.000.000.000.000.000.000.001.18
L-R1-7B-DS98.820.000.000.000.000.000.000.000.000.000.001.18
s1.1-32B98.820.000.000.000.000.000.000.000.000.000.001.18
s1.1-14B95.970.000.000.240.000.000.240.000.000.000.003.55
s1.1-7B87.910.000.006.640.000.000.000.000.000.000.005.45
EXAONE-32B65.8833.890.000.240.000.000.000.000.000.000.000.00
EXAONE-7.8B63.5133.650.000.000.000.240.000.000.000.240.002.37
EXAONE-2.4B78.9115.880.000.000.000.000.000.000.000.000.005.21
", + "image_path": "1f08c05ec1b6385422750fdb4fe94ea288c5aa32337b466c418dd9e331f629f3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 177, + 568, + 415, + 580 + ], + "lines": [ + { + "bbox": [ + 177, + 568, + 415, + 580 + ], + "spans": [ + { + "bbox": [ + 177, + 568, + 415, + 580 + ], + "type": "text", + "content": "Table 15: Format type rates under greedy decoding setting." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "spans": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 96, + 180, + 498, + 627 + ], + "blocks": [ + { + "bbox": [ + 96, + 180, + 498, + 627 + ], + "lines": [ + { + "bbox": [ + 96, + 180, + 498, + 627 + ], + "spans": [ + { + "bbox": [ + 96, + 180, + 498, + 627 + ], + "type": "table", + "html": "
ModelSizeacc (Loose)acc (Strict)L-Corr ↑S-Corr ↑Tokens ↓
Qwen3-235B-A22B235B100.00100.00100.00100.00702.70
Qwen3-30B-A3B30B100.00100.00100.00100.00636.35
QwQ-32B32B100.00100.00100.00100.00750.41
Qwen3-32B32B99.7699.7699.7699.76673.62
Qwen3-14B14B99.7699.76100.00100.00597.06
Qwen3-8B8B99.7699.76100.00100.00649.45
Qwen3-1.7B1.7B99.5399.5399.7699.76579.01
Hunyuan-T1-100.00100.00100.00100.00541.09
DS-R1671B100.00100.00100.00100.00621.89
DS-R1-70B70B99.7699.76100.00100.00469.78
DS-R1-32B32B100.00100.00100.00100.00428.46
DS-R1-14B14B99.2999.2999.7699.76463.52
DS-R1-8B8B97.6397.3999.7699.53452.11
DS-R1-7B7B94.3194.3197.8797.87436.87
DS-R1-1.5B1.5B76.5476.5491.9491.94473.67
Sky-T1-32B32B99.5399.0599.7699.29157.12
Nemotron-49B49B99.5399.53100.00100.00337.94
Nemotron-8B8B84.6077.7398.3481.99446.62
L-R1-32B32B92.1885.7894.5585.78996.36
L-R1-32B-DS32B99.2999.2999.2999.29528.45
L-R1-14B-DS14B98.8298.8298.8298.82664.28
L-R1-7B-DS7B92.6592.6598.8298.82514.60
s1.1-32B32B98.8298.8298.8298.82983.38
s1.1-14B14B95.9795.5096.4595.97786.30
s1.1-7B7B94.3187.6894.5587.91630.52
EXAONE-32B32B97.6397.39100.0099.76746.89
EXAONE-7.8B7.8B86.7386.4997.6397.16947.92
EXAONE-2.4B2.4B72.9972.9994.7994.791394.72
", + "image_path": "82e56a71ee574ab5f5bddac61408570b4b3ccea7a66fd03d2bb855e1f7a38b1f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 635, + 525, + 661 + ], + "lines": [ + { + "bbox": [ + 67, + 635, + 525, + 661 + ], + "spans": [ + { + "bbox": [ + 67, + 635, + 525, + 661 + ], + "type": "text", + "content": "Table 16: Main results in the greedy decoding setting on the S1-Bench, sorted by model family. Bold teal marks best performance, teal second best, bold burgundy worst, and burgundy second worst." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "spans": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 88, + 87, + 505, + 528 + ], + "blocks": [ + { + "bbox": [ + 88, + 87, + 505, + 528 + ], + "lines": [ + { + "bbox": [ + 88, + 87, + 505, + 528 + ], + "spans": [ + { + "bbox": [ + 88, + 87, + 505, + 528 + ], + "type": "image", + "image_path": "e791da1c4318e09e42d5cbf06c66bdd4c446799ecfda954d440191112ca62b1b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 538, + 525, + 564 + ], + "lines": [ + { + "bbox": [ + 67, + 538, + 525, + 564 + ], + "spans": [ + { + "bbox": [ + 67, + 538, + 525, + 564 + ], + "type": "text", + "content": "Figure 9: Average response token counts on the 28 subcategories, which is the average result of five generations under top-p sampling." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 83, + 602, + 511, + 734 + ], + "blocks": [ + { + "bbox": [ + 83, + 602, + 511, + 734 + ], + "lines": [ + { + "bbox": [ + 83, + 602, + 511, + 734 + ], + "spans": [ + { + "bbox": [ + 83, + 602, + 511, + 734 + ], + "type": "image", + "image_path": "78e8d2a639b51786cbf504b87b7b9eb4482dadf5ec8f3a32a8585fe87b9a4491.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 743, + 525, + 756 + ], + "lines": [ + { + "bbox": [ + 67, + 743, + 525, + 756 + ], + "spans": [ + { + "bbox": [ + 67, + 743, + 525, + 756 + ], + "type": "text", + "content": "Figure 10: Maximum similarity between each segment and all preceding segments for LRMs across four categories." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 792, + 304, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 792, + 304, + 803 + ], + "spans": [ + { + "bbox": [ + 290, + 792, + 304, + 803 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 96, + 199, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 96, + 199, + 106 + ], + "spans": [ + { + "bbox": [ + 69, + 96, + 199, + 106 + ], + "type": "text", + "content": "Prompts for Solution Segmentation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 111, + 136, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 111, + 136, + 121 + ], + "spans": [ + { + "bbox": [ + 69, + 111, + 136, + 121 + ], + "type": "text", + "content": "Task Description:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 121, + 357, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 121, + 357, + 129 + ], + "spans": [ + { + "bbox": [ + 69, + 121, + 357, + 129 + ], + "type": "text", + "content": "Your task is to segment the given Chain of Thought according to the following rules:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 129, + 160, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 129, + 160, + 137 + ], + "spans": [ + { + "bbox": [ + 69, + 129, + 160, + 137 + ], + "type": "text", + "content": "1. Segmentation positions:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 137, + 525, + 179 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 69, + 137, + 525, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 137, + 525, + 163 + ], + "spans": [ + { + "bbox": [ + 69, + 137, + 525, + 163 + ], + "type": "text", + "content": "(1) Please identify and extract all sub-reasoning processes from the Chain of Thought that meet the following condition: They explicitly arrive at a conclusion (including cases phrased as questions, e.g., \"right?\") that is directly consistent with the Ground Truth. Reasoning processes that only indirectly support the Ground Truth or result in partially aligned conclusions should be excluded." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 163, + 524, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 163, + 524, + 179 + ], + "spans": [ + { + "bbox": [ + 69, + 163, + 524, + 179 + ], + "type": "text", + "content": "(2) After clearly reaching the conclusion consistent with the Ground Truth, insert the segmentation marker to distinctly separate each qualifying sub-reasoning process." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 179, + 145, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 179, + 145, + 188 + ], + "spans": [ + { + "bbox": [ + 69, + 179, + 145, + 188 + ], + "type": "text", + "content": "2. Output Restriction:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 188, + 524, + 222 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 69, + 188, + 420, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 188, + 420, + 196 + ], + "spans": [ + { + "bbox": [ + 69, + 188, + 420, + 196 + ], + "type": "text", + "content": "(1) You should only directly output the segmentation result without adding any additional supplements." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 196, + 524, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 196, + 524, + 222 + ], + "spans": [ + { + "bbox": [ + 69, + 196, + 524, + 222 + ], + "type": "text", + "content": "(2) Except for inserting the separator, you must not make any other modifications to the original Chain of Thought, not even minor character-level changes such as punctuation, spacing, or capitalization. In other words, after removing all separators, the output text must match the original Chain of Thought exactly." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 227, + 119, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 227, + 119, + 235 + ], + "spans": [ + { + "bbox": [ + 69, + 227, + 119, + 235 + ], + "type": "text", + "content": "Some cases:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 236, + 106, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 236, + 106, + 243 + ], + "spans": [ + { + "bbox": [ + 69, + 236, + 106, + 243 + ], + "type": "text", + "content": "Case 1:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 243, + 119, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 243, + 119, + 251 + ], + "spans": [ + { + "bbox": [ + 69, + 243, + 119, + 251 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 251, + 215, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 251, + 215, + 261 + ], + "spans": [ + { + "bbox": [ + 69, + 251, + 215, + 261 + ], + "type": "text", + "content": "What is the value of " + }, + { + "bbox": [ + 69, + 251, + 215, + 261 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 69, + 251, + 215, + 261 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 69, + 251, + 215, + 261 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 69, + 251, + 215, + 261 + ], + "type": "text", + "content": " plus 3 equals 8?" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 264, + 134, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 264, + 134, + 273 + ], + "spans": [ + { + "bbox": [ + 69, + 264, + 134, + 273 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 274, + 75, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 274, + 75, + 282 + ], + "spans": [ + { + "bbox": [ + 69, + 274, + 75, + 282 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 287, + 148, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 287, + 148, + 296 + ], + "spans": [ + { + "bbox": [ + 69, + 287, + 148, + 296 + ], + "type": "text", + "content": "Chain of Thought:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 296, + 524, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 296, + 524, + 312 + ], + "spans": [ + { + "bbox": [ + 69, + 296, + 524, + 312 + ], + "type": "text", + "content": "Okay, so I need to figure out what x is if x plus 3 equals 8. The problem says \"x plus 3 equals 8,\" so in mathematical terms, that would be: " + }, + { + "bbox": [ + 69, + 296, + 524, + 312 + ], + "type": "inline_equation", + "content": "x + 3 = 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 312, + 524, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 312, + 524, + 338 + ], + "spans": [ + { + "bbox": [ + 69, + 312, + 524, + 338 + ], + "type": "text", + "content": "Alright, now I need to solve for " + }, + { + "bbox": [ + 69, + 312, + 524, + 338 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 69, + 312, + 524, + 338 + ], + "type": "text", + "content": ". From what I remember in algebra, the goal is to isolate " + }, + { + "bbox": [ + 69, + 312, + 524, + 338 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 69, + 312, + 524, + 338 + ], + "type": "text", + "content": " on one side of the equation. That means I need to get rid of that \"+\"3\" that's being added to " + }, + { + "bbox": [ + 69, + 312, + 524, + 338 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 69, + 312, + 524, + 338 + ], + "type": "text", + "content": ". To do that, I should perform the opposite operation. Since it's adding 3, the opposite would be subtracting 3." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 338, + 400, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 338, + 400, + 347 + ], + "spans": [ + { + "bbox": [ + 69, + 338, + 400, + 347 + ], + "type": "text", + "content": "So, if I subtract 3 from both sides, that should keep things equal. Let me try that: " + }, + { + "bbox": [ + 69, + 338, + 400, + 347 + ], + "type": "inline_equation", + "content": "x + 3 - 3 = 8 - 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 347, + 524, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 347, + 524, + 363 + ], + "spans": [ + { + "bbox": [ + 69, + 347, + 524, + 363 + ], + "type": "text", + "content": "Simplifying both sides, on the left side, the " + }, + { + "bbox": [ + 69, + 347, + 524, + 363 + ], + "type": "inline_equation", + "content": "+3" + }, + { + "bbox": [ + 69, + 347, + 524, + 363 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 69, + 347, + 524, + 363 + ], + "type": "inline_equation", + "content": "-3" + }, + { + "bbox": [ + 69, + 347, + 524, + 363 + ], + "type": "text", + "content": " cancel each other out, leaving just " + }, + { + "bbox": [ + 69, + 347, + 524, + 363 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 69, + 347, + 524, + 363 + ], + "type": "text", + "content": ". And on the right side, 8 minus 3 is 5. So that simplifies to: " + }, + { + "bbox": [ + 69, + 347, + 524, + 363 + ], + "type": "inline_equation", + "content": "x = 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 364, + 459, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 364, + 459, + 372 + ], + "spans": [ + { + "bbox": [ + 69, + 364, + 459, + 372 + ], + "type": "text", + "content": "Wait, let me check if that makes sense. If I substitute " + }, + { + "bbox": [ + 69, + 364, + 459, + 372 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 69, + 364, + 459, + 372 + ], + "type": "text", + "content": " back into the original equation, does it hold true? Let's see:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 372, + 166, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 372, + 166, + 380 + ], + "spans": [ + { + "bbox": [ + 69, + 372, + 166, + 380 + ], + "type": "text", + "content": "Original equation: " + }, + { + "bbox": [ + 69, + 372, + 166, + 380 + ], + "type": "inline_equation", + "content": "x + 3 = 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 380, + 161, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 380, + 161, + 388 + ], + "spans": [ + { + "bbox": [ + 69, + 380, + 161, + 388 + ], + "type": "text", + "content": "Substitute " + }, + { + "bbox": [ + 69, + 380, + 161, + 388 + ], + "type": "inline_equation", + "content": "x = 5: 5 + 3 = 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 69, + 389, + 201, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 389, + 201, + 396 + ], + "spans": [ + { + "bbox": [ + 69, + 389, + 201, + 396 + ], + "type": "text", + "content": "5 plus 3 is indeed 8, so that works out." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 69, + 397, + 524, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 397, + 524, + 414 + ], + "spans": [ + { + "bbox": [ + 69, + 397, + 524, + 414 + ], + "type": "text", + "content": "Another way might be to consider what number, when you add 3 to it, gives 8. So starting from 8, if I take away 3, I should get back to the original number. That's the same as subtracting 3 from 8, which again gives 5. So that's consistent with the first method." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 69, + 414, + 524, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 414, + 524, + 431 + ], + "spans": [ + { + "bbox": [ + 69, + 414, + 524, + 431 + ], + "type": "text", + "content": "Wait a second, what if the original problem was written differently? The problem says \"x plus 3 equals 8.\" In symbols, that's unambiguous: " + }, + { + "bbox": [ + 69, + 414, + 524, + 431 + ], + "type": "inline_equation", + "content": "x + 3 = 8" + }, + { + "bbox": [ + 69, + 414, + 524, + 431 + ], + "type": "text", + "content": ". So no, I think that's right." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 69, + 431, + 133, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 431, + 133, + 439 + ], + "spans": [ + { + "bbox": [ + 69, + 431, + 133, + 439 + ], + "type": "text", + "content": "**Final Answer**" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 69, + 440, + 142, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 440, + 142, + 453 + ], + "spans": [ + { + "bbox": [ + 69, + 440, + 142, + 453 + ], + "type": "text", + "content": "The value of " + }, + { + "bbox": [ + 69, + 440, + 142, + 453 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 69, + 440, + 142, + 453 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 69, + 440, + 142, + 453 + ], + "type": "inline_equation", + "content": "\\boxed{5}" + }, + { + "bbox": [ + 69, + 440, + 142, + 453 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 69, + 456, + 111, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 456, + 111, + 465 + ], + "spans": [ + { + "bbox": [ + 69, + 456, + 111, + 465 + ], + "type": "text", + "content": "Output:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 69, + 465, + 524, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 465, + 524, + 481 + ], + "spans": [ + { + "bbox": [ + 69, + 465, + 524, + 481 + ], + "type": "text", + "content": "Okay, so I need to figure out what x is if x plus 3 equals 8. The problem says \"x plus 3 equals 8,\" so in mathematical terms, that would be: " + }, + { + "bbox": [ + 69, + 465, + 524, + 481 + ], + "type": "inline_equation", + "content": "x + 3 = 8" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 69, + 481, + 524, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 481, + 524, + 507 + ], + "spans": [ + { + "bbox": [ + 69, + 481, + 524, + 507 + ], + "type": "text", + "content": "Alright, now I need to solve for " + }, + { + "bbox": [ + 69, + 481, + 524, + 507 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 69, + 481, + 524, + 507 + ], + "type": "text", + "content": ". From what I remember in algebra, the goal is to isolate " + }, + { + "bbox": [ + 69, + 481, + 524, + 507 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 69, + 481, + 524, + 507 + ], + "type": "text", + "content": " on one side of the equation. That means I need to get rid of that \"+\"3\" that's being added to " + }, + { + "bbox": [ + 69, + 481, + 524, + 507 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 69, + 481, + 524, + 507 + ], + "type": "text", + "content": ". To do that, I should perform the opposite operation. Since it's adding 3, the opposite would be subtracting 3." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 69, + 507, + 400, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 507, + 400, + 515 + ], + "spans": [ + { + "bbox": [ + 69, + 507, + 400, + 515 + ], + "type": "text", + "content": "So, if I subtract 3 from both sides, that should keep things equal. Let me try that: " + }, + { + "bbox": [ + 69, + 507, + 400, + 515 + ], + "type": "inline_equation", + "content": "x + 3 - 3 = 8 - 3" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 69, + 515, + 524, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 515, + 524, + 533 + ], + "spans": [ + { + "bbox": [ + 69, + 515, + 524, + 533 + ], + "type": "text", + "content": "Simplifying both sides, on the left side, the " + }, + { + "bbox": [ + 69, + 515, + 524, + 533 + ], + "type": "inline_equation", + "content": "+3" + }, + { + "bbox": [ + 69, + 515, + 524, + 533 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 69, + 515, + 524, + 533 + ], + "type": "inline_equation", + "content": "-3" + }, + { + "bbox": [ + 69, + 515, + 524, + 533 + ], + "type": "text", + "content": " cancel each other out, leaving just x. And on the right side, 8 minus 3 is 5. So that simplifies to: " + }, + { + "bbox": [ + 69, + 515, + 524, + 533 + ], + "type": "inline_equation", + "content": "x = 5 < \\text{split}>" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 69, + 533, + 459, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 533, + 459, + 541 + ], + "spans": [ + { + "bbox": [ + 69, + 533, + 459, + 541 + ], + "type": "text", + "content": "Wait, let me check if that makes sense. If I substitute " + }, + { + "bbox": [ + 69, + 533, + 459, + 541 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 69, + 533, + 459, + 541 + ], + "type": "text", + "content": " back into the original equation, does it hold true? Let's see:" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 69, + 541, + 166, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 541, + 166, + 550 + ], + "spans": [ + { + "bbox": [ + 69, + 541, + 166, + 550 + ], + "type": "text", + "content": "Original equation: " + }, + { + "bbox": [ + 69, + 541, + 166, + 550 + ], + "type": "inline_equation", + "content": "x + 3 = 8" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 69, + 550, + 160, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 550, + 160, + 557 + ], + "spans": [ + { + "bbox": [ + 69, + 550, + 160, + 557 + ], + "type": "text", + "content": "Substitute " + }, + { + "bbox": [ + 69, + 550, + 160, + 557 + ], + "type": "inline_equation", + "content": "x = 5: 5 + 3 = 8" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 69, + 558, + 226, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 558, + 226, + 566 + ], + "spans": [ + { + "bbox": [ + 69, + 558, + 226, + 566 + ], + "type": "text", + "content": "5 plus 3 is indeed 8, so that works out." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 69, + 566, + 524, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 566, + 524, + 583 + ], + "spans": [ + { + "bbox": [ + 69, + 566, + 524, + 583 + ], + "type": "text", + "content": "Another way might be to consider what number, when you add 3 to it, gives 8. So starting from 8, if I take away 3, I should get back to the original number. That's the same as subtracting 3 from 8, which again gives 5. So that's consistent with the first method." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 69, + 583, + 524, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 583, + 524, + 600 + ], + "spans": [ + { + "bbox": [ + 69, + 583, + 524, + 600 + ], + "type": "text", + "content": "Wait a second, what if the original problem was written differently? The problem says \"x plus 3 equals 8.\" In symbols, that's unambiguous: " + }, + { + "bbox": [ + 69, + 583, + 524, + 600 + ], + "type": "inline_equation", + "content": "x + 3 = 8" + }, + { + "bbox": [ + 69, + 583, + 524, + 600 + ], + "type": "text", + "content": ". So no, I think that's right." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 69, + 600, + 133, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 600, + 133, + 608 + ], + "spans": [ + { + "bbox": [ + 69, + 600, + 133, + 608 + ], + "type": "text", + "content": "**Final Answer**" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 69, + 608, + 168, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 608, + 168, + 622 + ], + "spans": [ + { + "bbox": [ + 69, + 608, + 168, + 622 + ], + "type": "text", + "content": "The value of x is 5. " + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 69, + 625, + 178, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 625, + 178, + 634 + ], + "spans": [ + { + "bbox": [ + 69, + 625, + 178, + 634 + ], + "type": "text", + "content": "...Other examples are omitted.)" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 69, + 639, + 117, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 639, + 117, + 647 + ], + "spans": [ + { + "bbox": [ + 69, + 639, + 117, + 647 + ], + "type": "text", + "content": "Eval Target:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 69, + 647, + 114, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 647, + 114, + 655 + ], + "spans": [ + { + "bbox": [ + 69, + 647, + 114, + 655 + ], + "type": "text", + "content": "## Question:" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 69, + 656, + 107, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 656, + 107, + 666 + ], + "spans": [ + { + "bbox": [ + 69, + 656, + 107, + 666 + ], + "type": "text", + "content": "{question}" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 69, + 669, + 130, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 669, + 130, + 677 + ], + "spans": [ + { + "bbox": [ + 69, + 669, + 130, + 677 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 69, + 677, + 122, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 677, + 122, + 687 + ], + "spans": [ + { + "bbox": [ + 69, + 677, + 122, + 687 + ], + "type": "text", + "content": "{groundtruth}" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 69, + 691, + 143, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 691, + 143, + 700 + ], + "spans": [ + { + "bbox": [ + 69, + 691, + 143, + 700 + ], + "type": "text", + "content": "Chain of Thought:" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 69, + 700, + 135, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 700, + 135, + 709 + ], + "spans": [ + { + "bbox": [ + 69, + 700, + 135, + 709 + ], + "type": "text", + "content": "{thinking_process}" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 69, + 713, + 107, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 713, + 107, + 722 + ], + "spans": [ + { + "bbox": [ + 69, + 713, + 107, + 722 + ], + "type": "text", + "content": "Output:" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 203, + 735, + 389, + 746 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 735, + 389, + 746 + ], + "spans": [ + { + "bbox": [ + 203, + 735, + 389, + 746 + ], + "type": "text", + "content": "Table 17: Prompts for Solution Segmentation." + } + ] + } + ], + "index": 52, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "spans": [ + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 53 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 167, + 170, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 167, + 170, + 178 + ], + "spans": [ + { + "bbox": [ + 69, + 167, + 170, + 178 + ], + "type": "text", + "content": "Prompts for Error Analysis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 181, + 246, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 181, + 246, + 192 + ], + "spans": [ + { + "bbox": [ + 69, + 181, + 246, + 192 + ], + "type": "text", + "content": "Prompts for samples whose final answer is correct" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 195, + 137, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 195, + 137, + 206 + ], + "spans": [ + { + "bbox": [ + 69, + 195, + 137, + 206 + ], + "type": "text", + "content": "Task Description:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 205, + 515, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 205, + 515, + 223 + ], + "spans": [ + { + "bbox": [ + 69, + 205, + 515, + 223 + ], + "type": "text", + "content": "You will receive a Question, its corresponding Ground Truth, and a Chain of Thought(COT) generated by a LLM for that Question. Your task is to carefully analyze the CoT and assign it to one of the two predefined categories listed below." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 226, + 116, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 226, + 116, + 236 + ], + "spans": [ + { + "bbox": [ + 69, + 226, + 116, + 236 + ], + "type": "text", + "content": "Categories:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 236, + 376, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 236, + 376, + 244 + ], + "spans": [ + { + "bbox": [ + 69, + 236, + 376, + 244 + ], + "type": "text", + "content": "1: The CoT ***includes explicit incorrect conclusions*** in intermediate reasoning steps." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 244, + 411, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 244, + 411, + 253 + ], + "spans": [ + { + "bbox": [ + 69, + 244, + 411, + 253 + ], + "type": "text", + "content": "2: The CoT ***doesn't include any explicit incorrect conclusion*** in intermediate reasoning steps." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 256, + 232, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 256, + 232, + 267 + ], + "spans": [ + { + "bbox": [ + 69, + 256, + 232, + 267 + ], + "type": "text", + "content": "Output your evaluation in the following format:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 269, + 117, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 269, + 117, + 279 + ], + "spans": [ + { + "bbox": [ + 69, + 269, + 117, + 279 + ], + "type": "text", + "content": "TheReason:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 279, + 438, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 279, + 438, + 287 + ], + "spans": [ + { + "bbox": [ + 69, + 279, + 438, + 287 + ], + "type": "text", + "content": "[note: Conduct a step-by-step analysis, stating if and where explicit incorrect conclusions occur in the COT.]" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 290, + 113, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 290, + 113, + 300 + ], + "spans": [ + { + "bbox": [ + 69, + 290, + 113, + 300 + ], + "type": "text", + "content": "ErrorType:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 300, + 525, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 300, + 525, + 334 + ], + "spans": [ + { + "bbox": [ + 69, + 300, + 525, + 334 + ], + "type": "text", + "content": "[note: Summarize each incorrect conclusion into a specific error type using a phrase of less than 5 words, such as factual inaccuracies, logical fallacies, comprehension mistakes, calculation errors, formatting issues, and so forth, to better conduct further evaluation and analysis. Directly output a Python list, where each element represents the error type of a specific incorrect conclusion in the CoT. If there are no incorrect conclusions, return an empty list.]" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 337, + 123, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 337, + 123, + 347 + ], + "spans": [ + { + "bbox": [ + 69, + 337, + 123, + 347 + ], + "type": "text", + "content": "TheCategory:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 345, + 488, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 345, + 488, + 355 + ], + "spans": [ + { + "bbox": [ + 69, + 345, + 488, + 355 + ], + "type": "text", + "content": "[note: Provide your classification based on your analysis using only the number \"1\" or \"2\". Do not add any additional text.]" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 358, + 110, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 358, + 110, + 367 + ], + "spans": [ + { + "bbox": [ + 69, + 358, + 110, + 367 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 367, + 107, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 367, + 107, + 377 + ], + "spans": [ + { + "bbox": [ + 69, + 367, + 107, + 377 + ], + "type": "text", + "content": "{question}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 380, + 126, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 380, + 126, + 388 + ], + "spans": [ + { + "bbox": [ + 69, + 380, + 126, + 388 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 389, + 119, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 389, + 119, + 399 + ], + "spans": [ + { + "bbox": [ + 69, + 389, + 119, + 399 + ], + "type": "text", + "content": "{groundtruth}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 401, + 95, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 401, + 95, + 410 + ], + "spans": [ + { + "bbox": [ + 69, + 401, + 95, + 410 + ], + "type": "text", + "content": "COT:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 410, + 135, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 410, + 135, + 420 + ], + "spans": [ + { + "bbox": [ + 69, + 410, + 135, + 420 + ], + "type": "text", + "content": "{thinking_process}" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 423, + 117, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 423, + 117, + 433 + ], + "spans": [ + { + "bbox": [ + 69, + 423, + 117, + 433 + ], + "type": "text", + "content": "TheReason:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 438, + 252, + 448 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 438, + 252, + 448 + ], + "spans": [ + { + "bbox": [ + 69, + 438, + 252, + 448 + ], + "type": "text", + "content": "Prompts for samples whose final answer is incorrect" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 451, + 137, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 451, + 137, + 461 + ], + "spans": [ + { + "bbox": [ + 69, + 451, + 137, + 461 + ], + "type": "text", + "content": "Task Description:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 461, + 515, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 461, + 515, + 478 + ], + "spans": [ + { + "bbox": [ + 69, + 461, + 515, + 478 + ], + "type": "text", + "content": "You will receive a Question, its corresponding Ground Truth, and a Chain of Thought(COT) generated by a LLM for that Question. Your task is to carefully analyze the CoT and assign it to one of the two predefined categories listed below." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 482, + 116, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 482, + 116, + 491 + ], + "spans": [ + { + "bbox": [ + 69, + 482, + 116, + 491 + ], + "type": "text", + "content": "Categories:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 69, + 491, + 524, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 491, + 524, + 508 + ], + "spans": [ + { + "bbox": [ + 69, + 491, + 524, + 508 + ], + "type": "text", + "content": "1: Regardless of whether the CoT ultimately arrives at the correct final answer or not, ***the correct answer is explicitly mentioned at least once*** within the reasoning steps (even if it is not ultimately adopted)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 69, + 508, + 451, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 508, + 451, + 518 + ], + "spans": [ + { + "bbox": [ + 69, + 508, + 451, + 518 + ], + "type": "text", + "content": "2: ***The correct answer is never explicitly mentioned or referenced*** at any point within the reasoning steps." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 69, + 520, + 232, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 520, + 232, + 531 + ], + "spans": [ + { + "bbox": [ + 69, + 520, + 232, + 531 + ], + "type": "text", + "content": "Output your evaluation in the following format:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 69, + 534, + 117, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 534, + 117, + 544 + ], + "spans": [ + { + "bbox": [ + 69, + 534, + 117, + 544 + ], + "type": "text", + "content": "TheReason:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 69, + 543, + 524, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 543, + 524, + 552 + ], + "spans": [ + { + "bbox": [ + 69, + 543, + 524, + 552 + ], + "type": "text", + "content": "[note: Conduct a step-by-step analysis, explicitly stating whether and where a correct answer is mentioned within the reasoning steps.]" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 69, + 555, + 123, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 555, + 123, + 565 + ], + "spans": [ + { + "bbox": [ + 69, + 555, + 123, + 565 + ], + "type": "text", + "content": "TheCategory:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 69, + 564, + 488, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 564, + 488, + 573 + ], + "spans": [ + { + "bbox": [ + 69, + 564, + 488, + 573 + ], + "type": "text", + "content": "[note: Provide your classification based on your analysis using only the number \"1\" or \"2\". Do not add any additional text.]" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 69, + 576, + 110, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 576, + 110, + 586 + ], + "spans": [ + { + "bbox": [ + 69, + 576, + 110, + 586 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 69, + 586, + 107, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 586, + 107, + 595 + ], + "spans": [ + { + "bbox": [ + 69, + 586, + 107, + 595 + ], + "type": "text", + "content": "{question}" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 69, + 598, + 126, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 598, + 126, + 607 + ], + "spans": [ + { + "bbox": [ + 69, + 598, + 126, + 607 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 69, + 607, + 101, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 607, + 101, + 616 + ], + "spans": [ + { + "bbox": [ + 69, + 607, + 101, + 616 + ], + "type": "text", + "content": "{answer}" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 69, + 619, + 95, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 619, + 95, + 628 + ], + "spans": [ + { + "bbox": [ + 69, + 619, + 95, + 628 + ], + "type": "text", + "content": "COT:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 69, + 629, + 123, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 629, + 123, + 638 + ], + "spans": [ + { + "bbox": [ + 69, + 629, + 123, + 638 + ], + "type": "text", + "content": "{thinking_part}" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 69, + 641, + 117, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 641, + 117, + 650 + ], + "spans": [ + { + "bbox": [ + 69, + 641, + 117, + 650 + ], + "type": "text", + "content": "TheReason:" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 220, + 664, + 373, + 677 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 664, + 373, + 677 + ], + "spans": [ + { + "bbox": [ + 220, + 664, + 373, + 677 + ], + "type": "text", + "content": "Table 18: Prompts for Error Analysis." + } + ] + } + ], + "index": 39, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "spans": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 40 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 79, + 289, + 89 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 79, + 289, + 89 + ], + "spans": [ + { + "bbox": [ + 69, + 79, + 289, + 89 + ], + "type": "text", + "content": "Prompts for classify the \"gut moment\" for English questions" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 93, + 162, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 93, + 162, + 102 + ], + "spans": [ + { + "bbox": [ + 69, + 93, + 162, + 102 + ], + "type": "text", + "content": "Overall Task Description" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 102, + 524, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 102, + 524, + 119 + ], + "spans": [ + { + "bbox": [ + 69, + 102, + 524, + 119 + ], + "type": "text", + "content": "You will be given the beginning portion of a response written by a large language model when answering a question. Your task is to classify the response into one of the following four categories based on the initial comment about **the difficulty of the question**." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 119, + 524, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 119, + 524, + 136 + ], + "spans": [ + { + "bbox": [ + 69, + 119, + 524, + 136 + ], + "type": "text", + "content": "Important: Only consider the " + }, + { + "bbox": [ + 69, + 119, + 524, + 136 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 69, + 119, + 524, + 136 + ], + "type": "text", + "content": " initial comment " + }, + { + "bbox": [ + 69, + 119, + 524, + 136 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 69, + 119, + 524, + 136 + ], + "type": "text", + "content": " on difficulty made in the response. If the model later changes its assessment, please ignore those later revisions—focus solely on the first difficulty-related comment." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 136, + 319, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 136, + 319, + 144 + ], + "spans": [ + { + "bbox": [ + 69, + 136, + 319, + 144 + ], + "type": "text", + "content": "You must assign the response to exactly one of the four categories below:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 144, + 525, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 144, + 525, + 161 + ], + "spans": [ + { + "bbox": [ + 69, + 144, + 525, + 161 + ], + "type": "text", + "content": "Category 1: Initial comment indicates the question is easy. Initial comment includes phrases like \"simple,\" \"basic,\" \"straightforward,\" \"common,\" etc., clearly stating the question is easy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 161, + 524, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 161, + 524, + 178 + ], + "spans": [ + { + "bbox": [ + 69, + 161, + 524, + 178 + ], + "type": "text", + "content": "Category 2: Initial comment indicates the question is difficult: Initial comment includes phrases like \"a bit complex,\" \"somewhat difficult,\" \"challenging,\" \"tricky,\" etc., clearly stating the question is hard." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 178, + 525, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 178, + 525, + 195 + ], + "spans": [ + { + "bbox": [ + 69, + 178, + 525, + 195 + ], + "type": "text", + "content": "Category 3: Initial comment is neutral or suggests moderate difficulty: Initial comment includes phrases like \"interesting,\" \"not hard,\" \"not complex,\" \"fun,\" etc., without a clear indication of difficulty level." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 195, + 384, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 195, + 384, + 204 + ], + "spans": [ + { + "bbox": [ + 69, + 195, + 384, + 204 + ], + "type": "text", + "content": "Category 4: No comment on difficulty: The response contains no mention of difficulty at all." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 208, + 238, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 208, + 238, + 216 + ], + "spans": [ + { + "bbox": [ + 69, + 208, + 238, + 216 + ], + "type": "text", + "content": "Please output your result in the following format:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 216, + 443, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 216, + 443, + 226 + ], + "spans": [ + { + "bbox": [ + 69, + 216, + 443, + 226 + ], + "type": "text", + "content": "[Explanation of classification][Category (just the number)][Short phrase containing the comment]" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 226, + 429, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 226, + 429, + 234 + ], + "spans": [ + { + "bbox": [ + 69, + 226, + 429, + 234 + ], + "type": "text", + "content": "If it falls into Category 4 (no comment), write \"None\" for the [Short phrase containing the comment] part." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 238, + 245, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 238, + 245, + 248 + ], + "spans": [ + { + "bbox": [ + 69, + 238, + 245, + 248 + ], + "type": "text", + "content": "Examples for each category will be shown below." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 251, + 156, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 251, + 156, + 261 + ], + "spans": [ + { + "bbox": [ + 69, + 251, + 156, + 261 + ], + "type": "text", + "content": "## Category 1 Examples:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 265, + 103, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 265, + 103, + 273 + ], + "spans": [ + { + "bbox": [ + 69, + 265, + 103, + 273 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 274, + 151, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 274, + 151, + 283 + ], + "spans": [ + { + "bbox": [ + 69, + 274, + 151, + 283 + ], + "type": "text", + "content": "What is two plus three?" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 287, + 105, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 287, + 105, + 295 + ], + "spans": [ + { + "bbox": [ + 69, + 287, + 105, + 295 + ], + "type": "text", + "content": "Response:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 296, + 524, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 296, + 524, + 322 + ], + "spans": [ + { + "bbox": [ + 69, + 296, + 524, + 322 + ], + "type": "text", + "content": "Hmm, the user is asking what two plus three equals. This question seems very simple, though it might be a bit confusing for someone just starting to learn math. First, I need to figure out what the user's intent is. They might be testing my basic calculation ability or genuinely need help solving the problem." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 326, + 96, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 326, + 96, + 334 + ], + "spans": [ + { + "bbox": [ + 69, + 326, + 96, + 334 + ], + "type": "text", + "content": "Output:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 334, + 430, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 334, + 430, + 344 + ], + "spans": [ + { + "bbox": [ + 69, + 334, + 430, + 344 + ], + "type": "text", + "content": "The initial comment describes the question as \"very simple\"1This question seems very simple" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 348, + 178, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 348, + 178, + 356 + ], + "spans": [ + { + "bbox": [ + 69, + 348, + 178, + 356 + ], + "type": "text", + "content": "...(Other examples are omitted.)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 360, + 156, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 360, + 156, + 370 + ], + "spans": [ + { + "bbox": [ + 69, + 360, + 156, + 370 + ], + "type": "text", + "content": "## Category 2 Examples:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 375, + 103, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 375, + 103, + 382 + ], + "spans": [ + { + "bbox": [ + 69, + 375, + 103, + 382 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 383, + 230, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 383, + 230, + 391 + ], + "spans": [ + { + "bbox": [ + 69, + 383, + 230, + 391 + ], + "type": "text", + "content": "What's the next floor after basement level two?" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 396, + 105, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 396, + 105, + 405 + ], + "spans": [ + { + "bbox": [ + 69, + 396, + 105, + 405 + ], + "type": "text", + "content": "Response:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 69, + 405, + 524, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 405, + 524, + 431 + ], + "spans": [ + { + "bbox": [ + 69, + 405, + 524, + 431 + ], + "type": "text", + "content": "Hmm, this question seems a bit complex, but I'll try to understand it first. The question is \"What's the next floor after basement level two?\" First, I need to clarify what \"basement level two\" means. Usually, the first basement is the deepest, and then going up you have ground floor, first floor, and so on." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 69, + 435, + 96, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 435, + 96, + 444 + ], + "spans": [ + { + "bbox": [ + 69, + 435, + 96, + 444 + ], + "type": "text", + "content": "Output:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 69, + 444, + 386, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 444, + 386, + 453 + ], + "spans": [ + { + "bbox": [ + 69, + 444, + 386, + 453 + ], + "type": "text", + "content": "The initial comment says it's \"a bit complex\" " + }, + { + "bbox": [ + 69, + 444, + 386, + 453 + ], + "type": "inline_equation", + "content": "<\\text{sep}>2" + }, + { + "bbox": [ + 69, + 444, + 386, + 453 + ], + "type": "inline_equation", + "content": "<\\text{sep}>" + }, + { + "bbox": [ + 69, + 444, + 386, + 453 + ], + "type": "text", + "content": " This question seems a bit complex" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 69, + 456, + 178, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 456, + 178, + 465 + ], + "spans": [ + { + "bbox": [ + 69, + 456, + 178, + 465 + ], + "type": "text", + "content": "...Other examples are omitted.)" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 69, + 470, + 156, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 470, + 156, + 479 + ], + "spans": [ + { + "bbox": [ + 69, + 470, + 156, + 479 + ], + "type": "text", + "content": "## Category 3 Examples:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 69, + 480, + 103, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 480, + 103, + 486 + ], + "spans": [ + { + "bbox": [ + 69, + 480, + 103, + 486 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 69, + 486, + 446, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 486, + 446, + 496 + ], + "spans": [ + { + "bbox": [ + 69, + 486, + 446, + 496 + ], + "type": "text", + "content": "\"Doctors recommend drinking at least eight glasses of water a day.\" Is this sentence about health or education?" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 69, + 500, + 105, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 500, + 105, + 508 + ], + "spans": [ + { + "bbox": [ + 69, + 500, + 105, + 508 + ], + "type": "text", + "content": "Response:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 69, + 508, + 524, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 508, + 524, + 526 + ], + "spans": [ + { + "bbox": [ + 69, + 508, + 524, + 526 + ], + "type": "text", + "content": "Hmm, this question seems not complicated, though if you think carefully it might be a bit tricky. First, I need to understand the meaning of the sentence \"Doctors recommend drinking at least eight glasses of water a day.\"" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 69, + 530, + 96, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 530, + 96, + 538 + ], + "spans": [ + { + "bbox": [ + 69, + 530, + 96, + 538 + ], + "type": "text", + "content": "Output:" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 69, + 539, + 524, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 539, + 524, + 556 + ], + "spans": [ + { + "bbox": [ + 69, + 539, + 524, + 556 + ], + "type": "text", + "content": "The initial comment says it's \"not complicated,\" even though it later mentions it might be tricky. Classification should follow the initial comment3This question doesn't seem complicated" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 69, + 560, + 178, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 560, + 178, + 569 + ], + "spans": [ + { + "bbox": [ + 69, + 560, + 178, + 569 + ], + "type": "text", + "content": "...Other examples are omitted.)" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 69, + 573, + 156, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 573, + 156, + 582 + ], + "spans": [ + { + "bbox": [ + 69, + 573, + 156, + 582 + ], + "type": "text", + "content": "## Category 4 Examples:" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 69, + 587, + 103, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 587, + 103, + 595 + ], + "spans": [ + { + "bbox": [ + 69, + 587, + 103, + 595 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 69, + 596, + 324, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 596, + 324, + 605 + ], + "spans": [ + { + "bbox": [ + 69, + 596, + 324, + 605 + ], + "type": "text", + "content": "All birds have feathers. A sparrow is a bird. Does a sparrow have feathers?" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 69, + 609, + 105, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 609, + 105, + 617 + ], + "spans": [ + { + "bbox": [ + 69, + 609, + 105, + 617 + ], + "type": "text", + "content": "Response:" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 69, + 618, + 524, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 618, + 524, + 635 + ], + "spans": [ + { + "bbox": [ + 69, + 618, + 524, + 635 + ], + "type": "text", + "content": "Hmm, the user is asking a question about birds and feathers—specifically, that all birds have feathers, a sparrow is a bird, so does the sparrow have feathers? This looks like a logic reasoning question. The user might be learning the basics of logic or biology." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 69, + 640, + 96, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 640, + 96, + 647 + ], + "spans": [ + { + "bbox": [ + 69, + 640, + 96, + 647 + ], + "type": "text", + "content": "Output:" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 69, + 648, + 483, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 648, + 483, + 657 + ], + "spans": [ + { + "bbox": [ + 69, + 648, + 483, + 657 + ], + "type": "text", + "content": "Although the response mentions it's a \"logic reasoning question,\" it contains no evaluation of difficulty4None" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 69, + 661, + 178, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 661, + 178, + 670 + ], + "spans": [ + { + "bbox": [ + 69, + 661, + 178, + 670 + ], + "type": "text", + "content": "...Other examples are omitted.)" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 69, + 674, + 199, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 674, + 199, + 683 + ], + "spans": [ + { + "bbox": [ + 69, + 674, + 199, + 683 + ], + "type": "text", + "content": "Here is the one you need to classify." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 69, + 687, + 103, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 687, + 103, + 695 + ], + "spans": [ + { + "bbox": [ + 69, + 687, + 103, + 695 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 69, + 697, + 99, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 697, + 99, + 705 + ], + "spans": [ + { + "bbox": [ + 69, + 697, + 99, + 705 + ], + "type": "text", + "content": "question" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 69, + 709, + 105, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 709, + 105, + 717 + ], + "spans": [ + { + "bbox": [ + 69, + 709, + 105, + 717 + ], + "type": "text", + "content": "Response:" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 69, + 719, + 99, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 719, + 99, + 727 + ], + "spans": [ + { + "bbox": [ + 69, + 719, + 99, + 727 + ], + "type": "text", + "content": "response" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 69, + 731, + 96, + 740 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 731, + 96, + 740 + ], + "spans": [ + { + "bbox": [ + 69, + 731, + 96, + 740 + ], + "type": "text", + "content": "Output:" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 149, + 753, + 443, + 765 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 753, + 443, + 765 + ], + "spans": [ + { + "bbox": [ + 149, + 753, + 443, + 765 + ], + "type": "text", + "content": "Table 19: Prompts for classifying the \"gut moment\" in English questions." + } + ] + } + ], + "index": 51, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 793, + 302, + 802 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 793, + 302, + 802 + ], + "spans": [ + { + "bbox": [ + 290, + 793, + 302, + 802 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 52 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 304, + 336, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 304, + 336, + 316 + ], + "spans": [ + { + "bbox": [ + 69, + 304, + 336, + 316 + ], + "type": "text", + "content": "Categories: instruction following Model: DeepSeek-R1-Distill-Qwen-7B" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 324, + 105, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 324, + 105, + 333 + ], + "spans": [ + { + "bbox": [ + 69, + 324, + 105, + 333 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 336, + 273, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 336, + 273, + 346 + ], + "spans": [ + { + "bbox": [ + 69, + 336, + 273, + 346 + ], + "type": "text", + "content": "Output the number \"7\" four times, without using separators." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 348, + 124, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 348, + 124, + 357 + ], + "spans": [ + { + "bbox": [ + 69, + 348, + 124, + 357 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 360, + 88, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 360, + 88, + 369 + ], + "spans": [ + { + "bbox": [ + 69, + 360, + 88, + 369 + ], + "type": "text", + "content": "7777" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 372, + 134, + 382 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 372, + 134, + 382 + ], + "spans": [ + { + "bbox": [ + 69, + 372, + 134, + 382 + ], + "type": "text", + "content": "Thinking process:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 384, + 524, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 384, + 524, + 402 + ], + "spans": [ + { + "bbox": [ + 69, + 384, + 524, + 402 + ], + "type": "text", + "content": "To solve the problem of outputting the number \"7\" four times without using separators, I need to find a way to repeat the digit 7 multiple times without relying on concatenation or other connecting characters." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 402, + 524, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 402, + 524, + 419 + ], + "spans": [ + { + "bbox": [ + 69, + 402, + 524, + 419 + ], + "type": "text", + "content": "One approach is to use mathematical operations that inherently produce the number 7. Addition is a simple and effective operation that can be used to achieve this. By adding 7 to itself multiple times, I can create the desired sequence of numbers." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 419, + 524, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 419, + 524, + 436 + ], + "spans": [ + { + "bbox": [ + 69, + 419, + 524, + 436 + ], + "type": "text", + "content": "Specifically, adding 7 four times will give me " + }, + { + "bbox": [ + 69, + 419, + 524, + 436 + ], + "type": "inline_equation", + "content": "7 + 7 + 7 + 7" + }, + { + "bbox": [ + 69, + 419, + 524, + 436 + ], + "type": "text", + "content": ", which equals 28. This method ensures that the number 7 is used exactly four times without the need for any separators." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 436, + 442, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 436, + 442, + 445 + ], + "spans": [ + { + "bbox": [ + 69, + 436, + 442, + 445 + ], + "type": "text", + "content": "Therefore, the solution is to use the addition operation to repeat the number 7 four times and output the result." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 447, + 120, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 447, + 120, + 456 + ], + "spans": [ + { + "bbox": [ + 69, + 447, + 120, + 456 + ], + "type": "text", + "content": "Final Answer:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 460, + 524, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 460, + 524, + 477 + ], + "spans": [ + { + "bbox": [ + 69, + 460, + 524, + 477 + ], + "type": "text", + "content": "To output the number \"7\" four times without using separators, you can utilize mathematical operations that inherently produce the number 7 multiple times. One straightforward way is to use addition:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 479, + 138, + 489 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 479, + 138, + 489 + ], + "spans": [ + { + "bbox": [ + 69, + 479, + 138, + 489 + ], + "type": "interline_equation", + "content": "[ 7 + 7 + 7 + 7 = 2 8 ]", + "image_path": "6ad24dea6df92c46ab5d9036c37f57d52f429e8462dee13df1dc56557b0ac1ba.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 491, + 480, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 491, + 480, + 501 + ], + "spans": [ + { + "bbox": [ + 69, + 491, + 480, + 501 + ], + "type": "text", + "content": "This expression shows that the number \"7\" is used four times in a row without any separators, resulting in the total of 28." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 503, + 130, + 517 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 503, + 130, + 517 + ], + "spans": [ + { + "bbox": [ + 69, + 503, + 130, + 517 + ], + "type": "interline_equation", + "content": "[ \\overline {{7 + 7 + 7 + 7}} ]", + "image_path": "67d5ef35d6644f2d755924bd7bebfba1f466e5a08a7281829c2a506289f6614b.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 233, + 529, + 359, + 539 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 529, + 359, + 539 + ], + "spans": [ + { + "bbox": [ + 233, + 529, + 359, + 539 + ], + "type": "text", + "content": "Table 20: Error Case for LRM." + } + ] + } + ], + "index": 15, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "spans": [ + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 143, + 105, + 153 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 143, + 105, + 153 + ], + "spans": [ + { + "bbox": [ + 69, + 143, + 105, + 153 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 154, + 240, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 154, + 240, + 164 + ], + "spans": [ + { + "bbox": [ + 69, + 154, + 240, + 164 + ], + "type": "text", + "content": "输出一个动物的英文单词,并且只有三个字母" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 166, + 329, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 166, + 329, + 176 + ], + "spans": [ + { + "bbox": [ + 69, + 166, + 329, + 176 + ], + "type": "text", + "content": "Output an English word for an animal, and it must be only three letters long." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 178, + 124, + 187 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 178, + 124, + 187 + ], + "spans": [ + { + "bbox": [ + 69, + 178, + 124, + 187 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 189, + 279, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 189, + 279, + 199 + ], + "spans": [ + { + "bbox": [ + 69, + 189, + 279, + 199 + ], + "type": "text", + "content": "任何一个由三个字母组成的动物名称都可以(例如:cat)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 200, + 299, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 200, + 299, + 211 + ], + "spans": [ + { + "bbox": [ + 69, + 200, + 299, + 211 + ], + "type": "text", + "content": "Any animal name consisting of three letters is acceptable (e.g., cat)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 212, + 134, + 222 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 212, + 134, + 222 + ], + "spans": [ + { + "bbox": [ + 69, + 212, + 134, + 222 + ], + "type": "text", + "content": "Thinking process:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 224, + 524, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 224, + 524, + 259 + ], + "spans": [ + { + "bbox": [ + 69, + 224, + 524, + 259 + ], + "type": "text", + "content": "嗯,用户让我输出一个动物的英文单词,而且必须是三个字母的。首先,我得确定有哪些常见的动物是三个字母的。可能想到的有猫(Cat)、狗(Dog),不过这两个都是三个字母吗?Cat是C-A-T,确实是三个字母,Dog也是D-O-G,三个字母。那这两个应该都可以。不过用户可能需要更多的例子,或者只需要一个?题目里说的是“输出一个”,所以可能只需要一个。" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 261, + 524, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 261, + 524, + 290 + ], + "spans": [ + { + "bbox": [ + 69, + 261, + 524, + 290 + ], + "type": "text", + "content": "不过等等,可能还有其他选项,比如Ant(蚂蚁)、Ape(猿)、Bat(蝙蝠)、Bear(熊)是四个字母,所以不行。Fish是四个字母,Fox是三个字母,F-O-X,对,Fox也是三个字母。还有Lion是四个字母,不行。Tiger也是五个字母。Rat是三个字母,R-A-T。还有Sheep是五个字母,Cow是三个字母,C-O-W。所以可能的选择还挺多的。" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 292, + 524, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 292, + 524, + 312 + ], + "spans": [ + { + "bbox": [ + 69, + 292, + 524, + 312 + ], + "type": "text", + "content": "不过用户可能想要的是最常见的,比如Cat、Dog、Fox、Ant、Rat、Cow这些。需要确认这些单词是否都是正确的动物名称。比如Ant是蚂蚁,没错。Fox是狐狸,Rat是老鼠,Cow是奶牛。这些都是正确的。所以可能需要从这些里面选一个。" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 313, + 524, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 313, + 524, + 333 + ], + "spans": [ + { + "bbox": [ + 69, + 313, + 524, + 333 + ], + "type": "text", + "content": "不过用户可能希望答案只有一个,所以可能需要随机选一个,或者选最典型的。比如Cat和Dog是最常见的宠物,可能更容易被想到。但Fox也是一个不错的选择,可能更有趣一些。或者像Ape,虽然也是三个字母,但可能不如其他常见。" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 334, + 520, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 334, + 520, + 353 + ], + "spans": [ + { + "bbox": [ + 69, + 334, + 520, + 353 + ], + "type": "text", + "content": "另外,用户有没有可能有其他需求?比如是否区分大小写?题目里说是英文单词,通常首字母不大写,除非是专有名词。所以应该用小写,比如cat、dog。不过输出的时候可能需要大写首字母,但题目没有特别说明,可能保持小写即可。" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 354, + 524, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 354, + 524, + 391 + ], + "spans": [ + { + "bbox": [ + 69, + 354, + 524, + 391 + ], + "type": "text", + "content": "再检查一下有没有拼写错误,比如Bat是B-A-T,没错。Rat是R-A-T,正确。Cow是C-O-W,没错。所以这些都可以作为候选答案。现在需要从中选一个作为回答。可能用户希望得到多个例子,但题目要求输出一个,所以可能需要选最典型的。比如Cat或者Dog,因为这两个是最常见的宠物,可能更容易被用户想到。不过也有可能用户希望有变化,比如Fox这样的动物,比较有趣。" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 392, + 524, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 392, + 524, + 412 + ], + "spans": [ + { + "bbox": [ + 69, + 392, + 524, + 412 + ], + "type": "text", + "content": "或者,用户可能是在测试知识库,看看是否知道除了Cat和Dog之外的其他三个字母的动物。比如Ant,虽然蚂蚁很小,但确实是动物。或者Rat,虽然老鼠可能不太受欢迎,但确实属于动物。Cow是家畜,可能也是合理的选择。" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 412, + 519, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 412, + 519, + 442 + ], + "spans": [ + { + "bbox": [ + 69, + 412, + 519, + 442 + ], + "type": "text", + "content": "所以,综合考虑,可能需要给出多个例子,但题目要求输出一个。这时候可能需要选择其中一个,比如Cat,或者Dog,或者Fox。例如,回答“Cat”或者“Dog”都是正确的。不过为了多样性,可能选Fox,因为不是所有人都会立刻想到它,而Cat和Dog比较基础。或者用户可能希望得到不同的答案,所以可以选其中一个,比如“Fox”作为回答。" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 68, + 444, + 524, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 444, + 524, + 480 + ], + "spans": [ + { + "bbox": [ + 68, + 444, + 524, + 480 + ], + "type": "text", + "content": "Well, the user asked me to output an English word for an animal, and it must be three letters long. First, I need to figure out which common animals have three letters. I can think of Cat and Dog, but are both of these three letters? Cat is C-A-T, and it is indeed three letters. Dog is D-O-G, also three letters. So both of these should be valid. But the user might need more examples, or just one? The task says \"output one,\" so it might only require one." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 68, + 480, + 524, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 480, + 524, + 505 + ], + "spans": [ + { + "bbox": [ + 68, + 480, + 524, + 505 + ], + "type": "text", + "content": "But wait, there could be other options, like Ant, Ape, Bat, and Bear, but Bear has four letters, so it doesn't count. Fish has four letters, too. Fox is three letters, F-O-X, yes, Fox is three letters. Lion has four letters, so that doesn't work. Tiger has five letters. Rat is three letters, R-A-T. Sheep has five letters. Cow is three letters, C-O-W. So there are quite a few possibilities." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 68, + 506, + 524, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 506, + 524, + 530 + ], + "spans": [ + { + "bbox": [ + 68, + 506, + 524, + 530 + ], + "type": "text", + "content": "However, the user might be looking for the most common ones, like Cat, Dog, Fox, Ant, Rat, and Cow. I need to check if these words are all correct animal names. For example, Ant is an ant, that's right. Fox is a fox, Rat is a rat, Cow is a cow. These are all correct. So we might need to choose one from these." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 68, + 531, + 524, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 531, + 524, + 555 + ], + "spans": [ + { + "bbox": [ + 68, + 531, + 524, + 555 + ], + "type": "text", + "content": "But the user may want only one answer, so I might need to pick randomly or choose the most typical one. For instance, Cat and Dog are the most common pets, so they might be easier to think of. But Fox is also a good choice and might be more interesting. Or something like Ape, though it's three letters, might not be as common as the others." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 68, + 555, + 524, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 555, + 524, + 581 + ], + "spans": [ + { + "bbox": [ + 68, + 555, + 524, + 581 + ], + "type": "text", + "content": "Also, is there any other requirement from the user? For instance, should I differentiate between uppercase and lowercase? The task says it's an English word, and usually, the first letter isn't capitalized unless it's a proper noun. So, it should be in lowercase, like cat, dog. However, when outputting, the first letter might need to be capitalized, but the task doesn't specify this, so lowercase should be fine." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 68, + 581, + 524, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 581, + 524, + 615 + ], + "spans": [ + { + "bbox": [ + 68, + 581, + 524, + 615 + ], + "type": "text", + "content": "Let's double-check for spelling errors: Bat is B-A-T, correct. Rat is R-A-T, correct. Cow is C-O-W, correct. So these can all be candidate answers. Now, I need to choose one as the response. The user might want several examples, but the task asks for only one, so I might need to pick the most typical one. For example, Cat or Dog, since these are the most common pets and might be easier for the user to think of. But Fox is also a good choice, as it's more interesting." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 68, + 616, + 524, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 616, + 524, + 640 + ], + "spans": [ + { + "bbox": [ + 68, + 616, + 524, + 640 + ], + "type": "text", + "content": "Or the user might be testing the knowledge base to see if it knows other three-letter animals besides Cat and Dog. For example, Ant, although ants are small, they are indeed animals. Or Rat, although rats might not be as popular, they are still animals. Cow is livestock, so that could also be a reasonable choice." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 68, + 641, + 524, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 641, + 524, + 674 + ], + "spans": [ + { + "bbox": [ + 68, + 641, + 524, + 674 + ], + "type": "text", + "content": "So, taking everything into account, I might need to give multiple examples, but the task requires one. In that case, I could choose one, like Cat, or Dog, or Fox. For instance, answering \"Cat\" or \"Dog\" would both be correct. However, for diversity, I might pick Fox, because not everyone will immediately think of it, whereas Cat and Dog are more basic. Or the user might want a different answer, so I could choose one of them, like \"Fox.\"" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 675, + 122, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 675, + 122, + 682 + ], + "spans": [ + { + "bbox": [ + 69, + 675, + 122, + 682 + ], + "type": "text", + "content": "Final Answer:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 69, + 686, + 104, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 686, + 104, + 694 + ], + "spans": [ + { + "bbox": [ + 69, + 686, + 104, + 694 + ], + "type": "text", + "content": "**Wolf**" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 233, + 709, + 358, + 720 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 709, + 358, + 720 + ], + "spans": [ + { + "bbox": [ + 233, + 709, + 358, + 720 + ], + "type": "text", + "content": "Table 21: Error Case for LRM." + } + ] + } + ], + "index": 26, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 124, + 271, + 135 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 124, + 271, + 135 + ], + "spans": [ + { + "bbox": [ + 69, + 124, + 271, + 135 + ], + "type": "text", + "content": "Categories: instruction following Model: hunyuan-T1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "spans": [ + { + "bbox": [ + 290, + 792, + 302, + 803 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 107, + 333, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 107, + 333, + 119 + ], + "spans": [ + { + "bbox": [ + 69, + 107, + 333, + 119 + ], + "type": "text", + "content": "Categories: knowledge question Model: DeepSeek-R1-Distill-Qwen-7B" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 127, + 105, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 127, + 105, + 137 + ], + "spans": [ + { + "bbox": [ + 69, + 127, + 105, + 137 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 139, + 240, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 139, + 240, + 149 + ], + "spans": [ + { + "bbox": [ + 69, + 139, + 240, + 149 + ], + "type": "text", + "content": "In which city is the Sydney Opera House located?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 151, + 124, + 160 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 151, + 124, + 160 + ], + "spans": [ + { + "bbox": [ + 69, + 151, + 124, + 160 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 163, + 96, + 174 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 163, + 96, + 174 + ], + "spans": [ + { + "bbox": [ + 69, + 163, + 96, + 174 + ], + "type": "text", + "content": "Sydney." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 175, + 134, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 175, + 134, + 186 + ], + "spans": [ + { + "bbox": [ + 69, + 175, + 134, + 186 + ], + "type": "text", + "content": "Thinking process:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 188, + 525, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 188, + 525, + 274 + ], + "spans": [ + { + "bbox": [ + 67, + 188, + 525, + 274 + ], + "type": "text", + "content": "Okay, so I need to figure out which city the Sydney Opera House is in. I know that Australia has a lot of cities, but I'm pretty sure the Opera House isn't in Sydney. Wait, isn't it in a different place? I think I've heard of a place called Petersham before, maybe that's where it's located. I remember seeing pictures of it in the news once, and they mentioned Petersham as the area where it's situated. But I'm not entirely sure if it's just a suburb or a major city. Maybe I should think about other major cities in Australia. There's Melbourne, which is known as the \"Melbourne\" city, and then there's Brisbane, which is more on the northern side. Then there's Sydney, which is the capital, but I don't think the Opera House is there. I think it's in a regional area instead of the capital city. So, putting it all together, I'm pretty confident that the Sydney Opera House is located in Petersham, which is a suburb in New South Wales, Australia. But I should double-check to make sure I'm not mixing up the names of places or cities. Maybe I can recall any famous landmarks in Petersham. I think there's a big shopping center there called the Petersham Shopping Centre, and I've heard that the Opera House is nearby. Yeah, that makes sense. So, I'm going to go with Petersham as the location." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 275, + 121, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 275, + 121, + 285 + ], + "spans": [ + { + "bbox": [ + 69, + 275, + 121, + 285 + ], + "type": "text", + "content": "Final Answer:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 288, + 379, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 288, + 379, + 298 + ], + "spans": [ + { + "bbox": [ + 69, + 288, + 379, + 298 + ], + "type": "text", + "content": "The Sydney Opera House is located in Petersham, a suburb in New South Wales, Australia." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 233, + 312, + 359, + 322 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 312, + 359, + 322 + ], + "spans": [ + { + "bbox": [ + 233, + 312, + 359, + 322 + ], + "type": "text", + "content": "Table 22: Error Case for LRM." + } + ] + } + ], + "index": 9, + "type": "text" + }, + { + "bbox": [ + 69, + 403, + 333, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 403, + 333, + 414 + ], + "spans": [ + { + "bbox": [ + 69, + 403, + 333, + 414 + ], + "type": "text", + "content": "Categories: knowledge question Model: DeepSeek-R1-Distill-Qwen-7B" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 423, + 105, + 433 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 423, + 105, + 433 + ], + "spans": [ + { + "bbox": [ + 69, + 423, + 105, + 433 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 434, + 267, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 434, + 267, + 444 + ], + "spans": [ + { + "bbox": [ + 69, + 434, + 267, + 444 + ], + "type": "text", + "content": "Who composed *Symphony No. 9 in D minor, Op. 125*?" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 446, + 124, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 446, + 124, + 456 + ], + "spans": [ + { + "bbox": [ + 69, + 446, + 124, + 456 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 459, + 149, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 459, + 149, + 469 + ], + "spans": [ + { + "bbox": [ + 69, + 459, + 149, + 469 + ], + "type": "text", + "content": "Ludwig van Beethoven" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 470, + 134, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 470, + 134, + 481 + ], + "spans": [ + { + "bbox": [ + 69, + 470, + 134, + 481 + ], + "type": "text", + "content": "Thinking process:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 483, + 525, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 483, + 525, + 509 + ], + "spans": [ + { + "bbox": [ + 67, + 483, + 525, + 509 + ], + "type": "text", + "content": "Alright, so I need to figure out who composed Symphony No. 9 in D minor, Op. 125. Let me start by recalling what I know about classical music composers. I know that there are several famous composers, but I'm trying to remember if it's someone like Beethoven, Mozart, or perhaps someone from a later era." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 509, + 524, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 509, + 524, + 527 + ], + "spans": [ + { + "bbox": [ + 67, + 509, + 524, + 527 + ], + "type": "text", + "content": "Wait, Beethoven wrote many symphonies, but I think he didn't write Symphony No. 9 in D minor. Let me think. Beethoven's 9th symphony is actually in E flat minor, Op. 67, right? So that's not it. Then there's Beethoven's Moonlight Sonata, which is different." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 527, + 524, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 527, + 524, + 552 + ], + "spans": [ + { + "bbox": [ + 67, + 527, + 524, + 552 + ], + "type": "text", + "content": "Mozart composed a lot of piano concertos, but I don't think he wrote a symphony in D minor. His compositions are usually in various key signatures, but D minor isn't one I associate with him. Then there's Schubert, who wrote the famous symphonies, but I don't recall him composing a symphony in D minor either." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 552, + 524, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 552, + 524, + 569 + ], + "spans": [ + { + "bbox": [ + 67, + 552, + 524, + 569 + ], + "type": "text", + "content": "Wait, maybe it's someone else. I've heard of Igor Stravinsky before, but he was more known for his operas like The Rite of Spring. Then there's Paul Simon, but he's a contemporary composer, so probably not." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 67, + 569, + 524, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 569, + 524, + 586 + ], + "spans": [ + { + "bbox": [ + 67, + 569, + 524, + 586 + ], + "type": "text", + "content": "Oh, maybe it's someone from the classical era. Let me think about other composers. There's Shostakovich, but I don't think he wrote that. What about someone like Tchaikovsky? No, he wrote symphonies in C minor and F major, but not D minor." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 586, + 524, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 586, + 524, + 611 + ], + "spans": [ + { + "bbox": [ + 67, + 586, + 524, + 611 + ], + "type": "text", + "content": "Hmm, maybe it's someone more obscure. I think I've heard of a composer named Igor Kovalsky. Wait, was that the composer of this symphony? I'm not entirely sure, but I think he's associated with writing complex symphonies. Alternatively, could it be someone like Shoenfield? I'm not certain." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 67, + 611, + 524, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 611, + 524, + 636 + ], + "spans": [ + { + "bbox": [ + 67, + 611, + 524, + 636 + ], + "type": "text", + "content": "Wait, I think I remember that Igor Kovalsky wrote this symphony. It's sometimes referred to as \"The New World\" because of its emotional depth and complexity. The symphony is in D minor, Op. 125, and it's known for its intense, dramatic sound. I think that's correct, but I should double-check to make sure it's not another composer." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 67, + 636, + 524, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 636, + 524, + 661 + ], + "spans": [ + { + "bbox": [ + 67, + 636, + 524, + 661 + ], + "type": "text", + "content": "Another thought: maybe it's someone from the 19th century. I'm trying to recall if any composers from that era wrote such a symphony. I'm not sure, but I think Igor Kovalsky is the one. He was an American composer, so that makes sense as a modern touch in classical music." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 67, + 661, + 524, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 661, + 524, + 679 + ], + "spans": [ + { + "bbox": [ + 67, + 661, + 524, + 679 + ], + "type": "text", + "content": "So, putting it all together, I believe the composer is Igor Kovalsky, and the symphony is known for its dramatic and intense sound. I think that's the correct answer, but I should confirm if there's any other possibility I might be forgetting." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 69, + 681, + 122, + 691 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 681, + 122, + 691 + ], + "spans": [ + { + "bbox": [ + 69, + 681, + 122, + 691 + ], + "type": "text", + "content": "Final Answer:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 67, + 693, + 524, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 693, + 524, + 711 + ], + "spans": [ + { + "bbox": [ + 67, + 693, + 524, + 711 + ], + "type": "text", + "content": "The composer of Symphony No. 9 in D minor, Op. 125, is Igor Kovalsky. This symphony is often recognized for its dramatic and intense sound, sometimes referred to as \"The New World.\"" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 232, + 725, + 358, + 736 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 725, + 358, + 736 + ], + "spans": [ + { + "bbox": [ + 232, + 725, + 358, + 736 + ], + "type": "text", + "content": "Table 23: Error Case for LRM." + } + ] + } + ], + "index": 27, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "spans": [ + { + "bbox": [ + 290, + 792, + 303, + 803 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 246, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 81, + 246, + 93 + ], + "spans": [ + { + "bbox": [ + 69, + 81, + 246, + 93 + ], + "type": "text", + "content": "Categories: reasoning question Model: s1.1-7B" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 101, + 105, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 101, + 105, + 111 + ], + "spans": [ + { + "bbox": [ + 69, + 101, + 105, + 111 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 113, + 130, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 113, + 130, + 122 + ], + "spans": [ + { + "bbox": [ + 69, + 113, + 130, + 122 + ], + "type": "text", + "content": "1.5加3.5等于几?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 124, + 142, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 124, + 142, + 133 + ], + "spans": [ + { + "bbox": [ + 69, + 124, + 142, + 133 + ], + "type": "text", + "content": "What is 1.5 plus 3.5?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 136, + 124, + 145 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 136, + 124, + 145 + ], + "spans": [ + { + "bbox": [ + 69, + 136, + 124, + 145 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 148, + 75, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 148, + 75, + 157 + ], + "spans": [ + { + "bbox": [ + 69, + 148, + 75, + 157 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 160, + 133, + 169 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 160, + 133, + 169 + ], + "spans": [ + { + "bbox": [ + 69, + 160, + 133, + 169 + ], + "type": "text", + "content": "Thinking process:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 171, + 485, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 171, + 485, + 181 + ], + "spans": [ + { + "bbox": [ + 69, + 171, + 485, + 181 + ], + "type": "text", + "content": "好的,我需要计算1.5加3.5等于多少。首先,这两个数都是小数,处理起来相对简单。我可以在心里快速算一下。" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 182, + 523, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 182, + 523, + 200 + ], + "spans": [ + { + "bbox": [ + 69, + 182, + 523, + 200 + ], + "type": "text", + "content": "首先,1.5加上3.5。可以将它们看成是 " + }, + { + "bbox": [ + 69, + 182, + 523, + 200 + ], + "type": "inline_equation", + "content": "1 + 0.5 + 3 + 0.5" + }, + { + "bbox": [ + 69, + 182, + 523, + 200 + ], + "type": "text", + "content": " 。这样就可以先将整数部分相加,再将小数部分相加。 " + }, + { + "bbox": [ + 69, + 182, + 523, + 200 + ], + "type": "inline_equation", + "content": "1 + 3 = 4" + }, + { + "bbox": [ + 69, + 182, + 523, + 200 + ], + "type": "text", + "content": " , " + }, + { + "bbox": [ + 69, + 182, + 523, + 200 + ], + "type": "inline_equation", + "content": "0.5 + 0.5 = 1" + }, + { + "bbox": [ + 69, + 182, + 523, + 200 + ], + "type": "text", + "content": " 。然后把两部分加起来是 " + }, + { + "bbox": [ + 69, + 182, + 523, + 200 + ], + "type": "inline_equation", + "content": "4 + 1 = 5" + }, + { + "bbox": [ + 69, + 182, + 523, + 200 + ], + "type": "text", + "content": " 。所以答案是5。" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 201, + 523, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 201, + 523, + 219 + ], + "spans": [ + { + "bbox": [ + 69, + 201, + 523, + 219 + ], + "type": "text", + "content": "另一种方法是将它们视为货币金额。比如,1.5美元加3.5美元。1.5美元加上3美元是4.5美元,再加上0.5美元就是5美元。这也得到同样的结果。" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 220, + 523, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 220, + 523, + 238 + ], + "spans": [ + { + "bbox": [ + 69, + 220, + 523, + 238 + ], + "type": "text", + "content": "当然,也可以用电子计算器来验证,但我认为手动计算更快。不过,为了确保准确,我再检查一遍。1.5加3.5。10加35等于45,然后小数点一位,所以是4.5。对的," + }, + { + "bbox": [ + 69, + 220, + 523, + 238 + ], + "type": "inline_equation", + "content": "1.5 + 3.5 = 4.5" + }, + { + "bbox": [ + 69, + 220, + 523, + 238 + ], + "type": "text", + "content": "。或者用竖式计算也可以:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 240, + 81, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 240, + 81, + 248 + ], + "spans": [ + { + "bbox": [ + 69, + 240, + 81, + 248 + ], + "type": "text", + "content": "1.5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 248, + 85, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 248, + 85, + 257 + ], + "spans": [ + { + "bbox": [ + 69, + 248, + 85, + 257 + ], + "type": "text", + "content": "+3.5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 262, + 92, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 262, + 92, + 273 + ], + "spans": [ + { + "bbox": [ + 69, + 262, + 92, + 273 + ], + "type": "text", + "content": "4.5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 274, + 358, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 274, + 358, + 282 + ], + "spans": [ + { + "bbox": [ + 69, + 274, + 358, + 282 + ], + "type": "text", + "content": "对,这样一看也确实是4.5。所以答案应该是4.5。我之前得到的结果是正确的。" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 284, + 524, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 284, + 524, + 301 + ], + "spans": [ + { + "bbox": [ + 69, + 284, + 524, + 301 + ], + "type": "text", + "content": "Alright, I need to calculate what 1.5 plus 3.5 equals. First, both of these numbers are decimals, so it's relatively simple to handle. I can quickly calculate it in my head." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 302, + 524, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 302, + 524, + 318 + ], + "spans": [ + { + "bbox": [ + 69, + 302, + 524, + 318 + ], + "type": "text", + "content": "First, 1.5 plus 3.5. You can think of them as " + }, + { + "bbox": [ + 69, + 302, + 524, + 318 + ], + "type": "inline_equation", + "content": "1 + 0.5 + 3 + 0.5" + }, + { + "bbox": [ + 69, + 302, + 524, + 318 + ], + "type": "text", + "content": ". This way, I can add the integer parts first, then the decimal parts. " + }, + { + "bbox": [ + 69, + 302, + 524, + 318 + ], + "type": "inline_equation", + "content": "1 + 3 = 4" + }, + { + "bbox": [ + 69, + 302, + 524, + 318 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 69, + 302, + 524, + 318 + ], + "type": "inline_equation", + "content": "0.5 + 0.5 = 1" + }, + { + "bbox": [ + 69, + 302, + 524, + 318 + ], + "type": "text", + "content": ". Then, adding the two parts together gives " + }, + { + "bbox": [ + 69, + 302, + 524, + 318 + ], + "type": "inline_equation", + "content": "4 + 1 = 5" + }, + { + "bbox": [ + 69, + 302, + 524, + 318 + ], + "type": "text", + "content": ". So, the answer is 5." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 319, + 524, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 319, + 524, + 335 + ], + "spans": [ + { + "bbox": [ + 69, + 319, + 524, + 335 + ], + "type": "text", + "content": "Another way is to treat them as currency amounts. For example, 1.5 dollars plus 3.5 dollars. 1.5 dollars plus 3 dollars equals 4.5 dollars, then adding 0.5 dollars gives 5 dollars. This also leads to the same result." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 336, + 524, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 336, + 524, + 360 + ], + "spans": [ + { + "bbox": [ + 69, + 336, + 524, + 360 + ], + "type": "text", + "content": "Of course, you could use a calculator to verify, but I think doing it manually is quicker. However, to ensure accuracy, I will check again. 1.5 plus 3.5. 10 plus 35 equals 45, then move the decimal point one place, so it's 4.5. Yes, " + }, + { + "bbox": [ + 69, + 336, + 524, + 360 + ], + "type": "inline_equation", + "content": "1.5 + 3.5 = 4.5" + }, + { + "bbox": [ + 69, + 336, + 524, + 360 + ], + "type": "text", + "content": ". Or you could use column addition:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 361, + 84, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 361, + 84, + 369 + ], + "spans": [ + { + "bbox": [ + 69, + 361, + 84, + 369 + ], + "type": "text", + "content": "1.5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 370, + 85, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 370, + 85, + 377 + ], + "spans": [ + { + "bbox": [ + 69, + 370, + 85, + 377 + ], + "type": "text", + "content": "+3.5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 378, + 92, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 378, + 92, + 384 + ], + "spans": [ + { + "bbox": [ + 69, + 378, + 92, + 384 + ], + "type": "text", + "content": "···" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 386, + 81, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 386, + 81, + 394 + ], + "spans": [ + { + "bbox": [ + 69, + 386, + 81, + 394 + ], + "type": "text", + "content": "4.5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 395, + 456, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 395, + 456, + 403 + ], + "spans": [ + { + "bbox": [ + 69, + 395, + 456, + 403 + ], + "type": "text", + "content": "Yes, looking at it this way, it indeed adds up to 4.5. So the answer should be 4.5. The result I got earlier is correct." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 405, + 121, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 405, + 121, + 414 + ], + "spans": [ + { + "bbox": [ + 69, + 405, + 121, + 414 + ], + "type": "text", + "content": "Final Answer:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 69, + 417, + 115, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 417, + 115, + 429 + ], + "spans": [ + { + "bbox": [ + 69, + 417, + 115, + 429 + ], + "type": "text", + "content": "答案是 4.5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 69, + 433, + 137, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 433, + 137, + 445 + ], + "spans": [ + { + "bbox": [ + 69, + 433, + 137, + 445 + ], + "type": "text", + "content": "The answer is 4.5" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 233, + 458, + 359, + 468 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 458, + 359, + 468 + ], + "spans": [ + { + "bbox": [ + 233, + 458, + 359, + 468 + ], + "type": "text", + "content": "Table 24: Error Case for LRM." + } + ] + } + ], + "index": 27, + "type": "text" + }, + { + "bbox": [ + 69, + 497, + 329, + 508 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 497, + 329, + 508 + ], + "spans": [ + { + "bbox": [ + 69, + 497, + 329, + 508 + ], + "type": "text", + "content": "Categories: analysis question Model: DeepSeek-R1-Distill-Qwen-1.5B" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 69, + 517, + 105, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 517, + 105, + 526 + ], + "spans": [ + { + "bbox": [ + 69, + 517, + 105, + 526 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 69, + 528, + 326, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 528, + 326, + 539 + ], + "spans": [ + { + "bbox": [ + 69, + 528, + 326, + 539 + ], + "type": "text", + "content": "In the sentence \"Napoleon died in 1821\", is \"1821\" a time or a place name?" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 69, + 540, + 124, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 540, + 124, + 550 + ], + "spans": [ + { + "bbox": [ + 69, + 540, + 124, + 550 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 69, + 553, + 90, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 553, + 90, + 562 + ], + "spans": [ + { + "bbox": [ + 69, + 553, + 90, + 562 + ], + "type": "text", + "content": "Time." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 69, + 565, + 133, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 565, + 133, + 575 + ], + "spans": [ + { + "bbox": [ + 69, + 565, + 133, + 575 + ], + "type": "text", + "content": "Thinking process:" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 69, + 577, + 524, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 577, + 524, + 595 + ], + "spans": [ + { + "bbox": [ + 69, + 577, + 524, + 595 + ], + "type": "text", + "content": "Okay, so I have this sentence: \"Napoleon died in 1821.\" I need to figure out whether \"1821\" is a time or a place name. Hmm, let me think about this step by step." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 69, + 596, + 524, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 596, + 524, + 620 + ], + "spans": [ + { + "bbox": [ + 69, + 596, + 524, + 620 + ], + "type": "text", + "content": "First, I know that \"Napoleon\" is a person, specifically a historical figure. So, if it's the name of a person, then \"1821\" must be related to their life. I'm pretty sure that \"1821\" is a year. I remember that Napoleon was born in 1769 and died in 1821. So, 1821 is definitely a year." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 69, + 620, + 524, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 620, + 524, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 620, + 524, + 645 + ], + "type": "text", + "content": "Wait, but what if I'm not sure? Let me consider other possibilities. Sometimes names can be place names or time periods. For example, if someone's name were a city or a country, the number could be a place. But in this case, \"Napoleon\" is a person, so it's more likely a year." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 69, + 645, + 524, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 645, + 524, + 671 + ], + "spans": [ + { + "bbox": [ + 69, + 645, + 524, + 671 + ], + "type": "text", + "content": "Also, thinking about the sentence structure, it's written as \"Napoleon died in 1821.\" The structure \"in\" suggests that 1821 is the place. So, in this context, \"in\" is an article that indicates the location, which would be a place name. That makes me think that \"1821\" is indeed a place name, the year." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 69, + 671, + 524, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 671, + 524, + 687 + ], + "spans": [ + { + "bbox": [ + 69, + 671, + 524, + 687 + ], + "type": "text", + "content": "I don't think it's a time because time is usually expressed as something like \"in the 19th century\" or \"2023.\" So, 1821 is specifically a year, not a time period. It's more precise because it's a specific year when Napoleon died." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 69, + 688, + 524, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 688, + 524, + 705 + ], + "spans": [ + { + "bbox": [ + 69, + 688, + 524, + 705 + ], + "type": "text", + "content": "So, putting it all together, \"Napoleon\" is a person, and \"in 1821\" is indicating the year he died. Therefore, \"1821\" is a place name, specifically a year." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 69, + 708, + 121, + 716 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 708, + 121, + 716 + ], + "spans": [ + { + "bbox": [ + 69, + 708, + 121, + 716 + ], + "type": "text", + "content": "Final Answer:" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 69, + 719, + 524, + 738 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 719, + 524, + 738 + ], + "spans": [ + { + "bbox": [ + 69, + 719, + 524, + 738 + ], + "type": "text", + "content": "In the sentence \"Napoleon died in 1821,\" the number \"1821\" is a place name, specifically a year. It is used to indicate the year Napoleon died, which is 1821. The use of \"in\" confirms that it refers to a place, not a time period. Therefore, \"1821\" is a year." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 233, + 752, + 359, + 762 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 752, + 359, + 762 + ], + "spans": [ + { + "bbox": [ + 233, + 752, + 359, + 762 + ], + "type": "text", + "content": "Table 25: Error Case for LRM." + } + ] + } + ], + "index": 42, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 793, + 302, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 793, + 302, + 803 + ], + "spans": [ + { + "bbox": [ + 290, + 793, + 302, + 803 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 43 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 30 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10415/6118f0df-c806-4166-9486-ac165b1c4226_content_list.json b/data/2025/2504_10xxx/2504.10415/6118f0df-c806-4166-9486-ac165b1c4226_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..351dab558cac3588935dc6779c2a0930ef193876 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/6118f0df-c806-4166-9486-ac165b1c4226_content_list.json @@ -0,0 +1,4938 @@ +[ + { + "type": "text", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "text_level": 1, + "bbox": [ + 111, + 109, + 859, + 156 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Parshin Shojaee* 1 Ngoc-Hieu Nguyen* 2 Kazem Meidani 34 Amir Barati Farimani 3 Khoa D Doan 2 Chandan K Reddy 1", + "bbox": [ + 181, + 198, + 787, + 236 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Website: https://github.com/deep-symbolic-mathematics/llm-srbench", + "bbox": [ + 176, + 239, + 792, + 253 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 241, + 281, + 320, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Scientific equation discovery has long been a cornerstone of scientific progress, enabling the derivation of laws governing natural phenomena. Recently, Large Language Models (LLMs) have gained interest for this task due to their potential to leverage embedded scientific knowledge for hypothesis generation. However, it is difficult to assess the true discovery capabilities of these methods because existing benchmarks often use well-known equations. This makes them vulnerable to memorization by LLMs and results in inflated performance metrics that do not reflect genuine discovery. In this paper, we introduce LLM-SRBench, a comprehensive benchmark with 239 challenging problems across four scientific domains specifically designed to evaluate LLM-based scientific equation discovery methods while preventing trivial memorization. Our benchmark comprises two main categories: LSR-Transform, which transforms common physical models into less common mathematical representations to test reasoning beyond memorized forms, and LSR-Synth, which introduces synthetic, discovery-driven problems requiring data-driven reasoning. Through extensive evaluation of several state-of-the-art methods, using both open and closed LLMs, we find that the best-performing system so far achieves only $31.5\\%$ symbolic accuracy. These findings highlight the challenges of scientific equation discovery, positioning LLM-SRBench as a valuable resource for future research.", + "bbox": [ + 117, + 303, + 444, + 787 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/29b608f1ad835a7b4b3cf0055c416cc691294f5fa75a973e1ef9650ca2acd5dc.jpg", + "image_caption": [ + "Figure 1. Error analysis comparing simple LLM sampling (Llama-3.1-8B) on 100 Feynman problems versus LLM-SRBench datasets (LSR-Transform and LSR-Synth). The sharp drops in numeric error curves and considerably lower symbolic error for Feynman problems suggest memorization rather than gradual discovery." + ], + "image_footnote": [], + "bbox": [ + 513, + 282, + 870, + 436 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 498, + 561, + 629, + 575 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Equation discovery, the process of uncovering symbolic mathematical expressions from observational data, has been a cornerstone of scientific advancement. This task, also known as symbolic regression (SR), goes beyond mere data-driven predictive modeling by seeking interpretable mathematical relations that reveal the underlying mechanisms of natural phenomena. When scientists derive mathematical equations from empirical data, they gain more than just predictive power – they obtain insights into fundamental physical principles, enable extrapolation beyond observed data, and facilitate knowledge transfer across scientific domains (Langley, 1981; Schmidt & Lipson, 2009).", + "bbox": [ + 495, + 580, + 887, + 762 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Standard approaches to equation discovery have primarily relied on genetic programming (GP) and evolutionary algorithms (Cranmer, 2023; La Cava et al., 2021), which represent mathematical expressions as trees and navigate the vast space of possible equations through evolutionary search techniques. However, these methods face two fundamental challenges. First, the NP-hard nature of equation discovery (Virgolin & Pissis, 2022) makes their random mutation and crossover operations computationally prohibitive across", + "bbox": [ + 495, + 768, + 888, + 905 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution ¹Virginia Tech ²VinUniversity ³Carnegie Mellon University ⁴Capital One. Correspondence to: Parshin Shojaee .", + "bbox": [ + 84, + 814, + 473, + 854 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Proceedings of the $42^{nd}$ International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s).", + "bbox": [ + 84, + 864, + 473, + 905 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10415v2 [cs.CL] 7 Jun 2025", + "bbox": [ + 22, + 285, + 57, + 710 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/94c039b1cedc695be2873c2dcb0437ad37c78b74f2f5c36f13aca7b21cf0c837.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 147, + 84, + 165, + 101 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Goal / Instruction", + "text_level": 1, + "bbox": [ + 176, + 87, + 269, + 97 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Discover the mathematical equation/law that describes [output variable] based on given [input features].", + "bbox": [ + 143, + 103, + 326, + 130 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Use domain-specific knowledge of [the scientific field] and provided data samples to find an equation that is scientifically valid and fits the data well.", + "bbox": [ + 143, + 130, + 331, + 165 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/16fbc11c1ac7574b7d1a59cd72413c0b5c055e02f015f88a1e7a9732b7b601d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 143, + 174, + 165, + 191 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Scientific Context", + "text_level": 1, + "bbox": [ + 176, + 179, + 271, + 188 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Problem description", + "bbox": [ + 158, + 196, + 246, + 205 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Variable names and descriptions", + "Example:" + ], + "bbox": [ + 142, + 205, + 300, + 223 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Find an equation in the field of classical mechanics that describes the mass $(m)$ needed to store energy in an oscillating system, given physical input variables: mean stored energy $(E_{m})$ , driving frequency $(\\omega)$ , natural frequency $(\\omega_{n})$ and amplitude $(x)$ .", + "bbox": [ + 147, + 226, + 331, + 263 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/dd4ace02bfdbbc327dd22c6ca51c918e6611e6e1a0a6dffb9ff8c4f2e067743e.jpg", + "image_caption": [ + "Figure 2. Overview of the LLM-based Scientific Equation Discovery. The benchmark tasks (left) combine scientific context with numerical data. The discovery process (middle) iteratively leverages LLM's scientific knowledge and data-driven reasoning to generate hypotheses for underlying equations. Discovered hypotheses, represented as equation strings, trees, or programs, are then evaluated (right) using multiple metrics including data fidelity, symbolic accuracy, and computational efficiency." + ], + "image_footnote": [], + "bbox": [ + 148, + 275, + 165, + 291 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/e0d63bd33ab92d526518753edc49274d6d59f20397bf8d0354c580b9853762fa.jpg", + "table_caption": [ + "Data" + ], + "table_footnote": [], + "table_body": "
\\( E_{\\mathrm {n}} \\)ω\\( \\omega_0 \\)xm
4.71.22.31.51.2
3.42.72.73.10.1
i
2.81.53.61.40.4
", + "bbox": [ + 140, + 295, + 336, + 349 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/eaaa041fd9512203f4e34c4bec9c0a6c971fa1e475a5cc93922305ebde0dc5dc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 364, + 83, + 393, + 103 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Typical Workflow", + "text_level": 1, + "bbox": [ + 398, + 87, + 490, + 97 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b4248be90e578b449703e07ee0bf472829ee92a629bf290e7b43622f53874a51.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 411, + 112, + 433, + 128 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "LLM internal scientific knowledge", + "bbox": [ + 441, + 112, + 537, + 132 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/2943312887403cbd3614ec26d86312c9ed3797876e4b24a507d8b8ccaf08be34.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 410, + 133, + 433, + 148 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Reasoning and planning", + "bbox": [ + 437, + 137, + 542, + 147 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/83fdb943052af6819deb8c9f9e526ad919dcda5736b655b45a9e19dc080f02a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 411, + 155, + 434, + 167 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Programming", + "bbox": [ + 450, + 157, + 509, + 167 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/a7c6d759afe9fd77afb493d4e3d1f4cb1fd3debd494f956b9c7e6f32ff6755e4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 379, + 180, + 584, + 273 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/a2d3aa7e47027c5fc32383269784b60c4e30210c9a1ddb79ea5b1ae367be0d02.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 413, + 279, + 437, + 299 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Parameter Optimization", + "bbox": [ + 441, + 284, + 544, + 292 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/bb81ea4b5f5d5a4f975ff0466f5ed65eeae3a9584feb539e69d440e2f25d1471.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 411, + 304, + 434, + 320 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Simulation", + "bbox": [ + 437, + 308, + 485, + 316 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/17beb428c63f2aabd0dfe88d4ab8d11372d076979bffbc2f467162f6659a2623.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 303, + 509, + 318 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Experiments", + "bbox": [ + 504, + 306, + 558, + 316 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4336f60ac23a31b7303fd2bcd302d1a07db5ad0fb5084d34cb24e039edff4412.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 419, + 324, + 442, + 340 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Statistical Fit to Data", + "bbox": [ + 446, + 330, + 535, + 338 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b2f081c94fa3b127bfd848223e598ebab8dca7669779b99dbbb482707e9719d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 637, + 84, + 658, + 102 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Hypothesis", + "text_level": 1, + "bbox": [ + 671, + 87, + 733, + 97 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Discovered mathematical equation represented by expressions, trees, programs, etc.", + "bbox": [ + 622, + 107, + 833, + 125 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Supporting explanations / reasoning", + "bbox": [ + 622, + 133, + 784, + 142 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "$m = 4^{*}E n / (x^{**}2^{*}$", + "bbox": [ + 635, + 152, + 717, + 160 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "(omega\\*\\*2 + omega_0\\*\\*2))", + "bbox": [ + 635, + 161, + 746, + 169 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e19abcc9793b69233bc2068d01085943b55800e6c5368956853e58e8c2357a1e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 754, + 143, + 834, + 191 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "def equation(E_n, omega, omega_0, x, params): # Energy-mass ratio normalized by parameter numerator $=$ params[0] $\\ast$ E n", + "bbox": [ + 625, + 185, + 803, + 204 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Combined frequency and amplitude scaling effects denominator $=$ omega++2 $\\ast$ x++2 + omega_0**2 $\\ast$ x**2 m $=$ numerator / denominator return m", + "bbox": [ + 640, + 205, + 831, + 231 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Evaluation", + "text_level": 1, + "bbox": [ + 663, + 250, + 720, + 258 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Data Fidelity:", + "In-Domain accuracy", + "- Out-of-Domain generalization" + ], + "bbox": [ + 640, + 267, + 807, + 292 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Symbolic Accuracy:", + "bbox": [ + 640, + 299, + 750, + 308 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Human expert/LLM evaluator", + "Scientific plausibility", + "Interpretability" + ], + "bbox": [ + 663, + 309, + 810, + 334 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Computational Efficiency", + "bbox": [ + 640, + 340, + 772, + 349 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "vast search spaces. Second, unlike human scientists who leverage their domain knowledge and expertise to guide hypothesis formation, these approaches are mostly purely data-driven, and isolated from existing scientific knowledge. These limitations have motivated researchers to develop methods that incorporate scientific domain knowledge into the equation discovery process.", + "bbox": [ + 84, + 454, + 475, + 560 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Large Language Models (LLMs) have recently emerged as a promising solution to these challenges, offering a new paradigm for scientific equation discovery. LLMs, trained on vast corpora of scientific literature, possess extensive embedded scientific knowledge. This has sparked significant interest in leveraging LLMs for scientific equation discovery, with several recent works demonstrating their potential (Shojae et al., 2024b; Ma et al., 2024; Grayeli et al., 2024; Merler et al., 2024; Du et al., 2024; Reddy & Shojaee, 2024; Zhang et al., 2024). These LLM-based approaches have shown to enhance the equation hypothesis generation process by incorporating scientific priors, guiding the exploration of equation search spaces more efficiently, and providing interpretable reasoning for the search process.", + "bbox": [ + 84, + 568, + 475, + 779 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite the promising potential of LLM-based equation discovery methods, their rigorous and robust evaluation still remains an open challenge. The current scientific equation discovery benchmarks are primarily represented by SRBench (La Cava et al., 2021) and SRSD (Matsubara et al., 2022). SRBench incorporates two key data groups for this purpose: the Feynman physics equations (Udrescu", + "bbox": [ + 84, + 786, + 475, + 892 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "& Tegmark, 2020), and Strogatz dynamical systems (La Cava et al., 2016; Strogatz, 2018). A notable extension to this framework is SRSD (Matsubara et al., 2022), which enhances the Feynman benchmark by incorporating physically meaningful sampling ranges for data points. However, these benchmarks exhibit significant limitations for the evaluation of LLM-based methods. Their problems are mostly based on known physics equations from textbooks, which makes them often subject to memorization by LLMs.", + "bbox": [ + 495, + 454, + 887, + 590 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As noted by (Shojaaee et al., 2024b), LLMs frequently succeed on these common equation discovery benchmarks through simple recitation based on variable names and problem descriptions, rather than the actual process of data-driven discovery and reasoning. Our analysis (shown in Fig. 1) also confirms this finding - the sudden drop in the numeric error curve within the first few iterations and significantly lower symbolic error on Feynman problems indicate memorized solutions rather than a meaningful search towards discovery. To mitigate this issue, (Shojaaee et al., 2024b; Ma et al., 2024) have introduced a handful of five custom-crafted problems designed to prevent memorization by manually modifying known physical models. While these efforts represent a step forward, the small scale and limited diversity of these problem sets are insufficient to provide a comprehensive evaluation framework for emerging LLM-based methods in scientific equation discovery. A more robust and systematic benchmark is needed to enable standardized evaluation and foster the development of innovative methods in this emerging field.", + "bbox": [ + 495, + 598, + 888, + 900 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we introduce LLM-SRBench, a new benchmark designed to rigorously evaluate the capabilities of LLM-based scientific equation discovery methods. LLM-SRBench addresses the limitations of existing benchmarks by constructing problem sets that avoid trivial recitation while leveraging the scientific priors embedded in LLMs, simulating conditions akin to scientific discovery. The benchmark is structured around two main categories of problems, each targeting distinct aspects of equation discovery. The first category focuses on transforming common scientific problems, such as those from the Feynman equations, into different mathematical representations of the same underlying physical problem. By symbolically altering input-output mappings and generating less common mathematical forms for the same problem, we challenge LLM-based equation discovery to go beyond memorization of the common forms. This approach is motivated by recent findings on the fragility of LLMs' reasoning capabilities to unfamiliar representations of otherwise familiar problems (Mirzadeh et al., 2024; Xie et al., 2024; Wu et al., 2023). The second category extends the approach introduced by (Shojae et al., 2024b), which combines known terms in the underlying equation with synthetic, novel terms to create problems that go beyond memorization and demand data-driven reasoning. We expand this idea into a comprehensive set of benchmark problems spanning diverse scientific domains. These problems incorporate carefully designed synthetic terms that are both novel and plausible. We further verify the solvability of the generated equations using numerical solvers, ensuring that the benchmark problems remain grounded in physical feasibility while presenting meaningful challenges for LLM-based discovery methods.", + "bbox": [ + 84, + 85, + 475, + 568 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LLM-SRBench comprises 111 problems in the first category (LSR-Transform), and 128 problems in the second category (LSR-Synth), spanning four scientific domains: chemistry (36), biology (24), physics (43), and material science (25). We comprehensively benchmark state-of-the-art LLM-based scientific equation discovery methods with several LLM backbones on these datasets. Our experiments reveal several key insights into the capabilities and limitations of current LLM-based scientific equation discovery methods. Results show that the best model can only solve $31.5\\%$ of problems on LSR-Transform and $28.1\\%$ on LSR-Synth. This underscores the challenging nature of the tasks in LLM-SRBench and highlights its potential as a critical evaluation foundation for future LLM-based scientific equation discovery methods. Overall, the contributions of this work are as follows:", + "bbox": [ + 84, + 575, + 477, + 816 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- We introduce LLM-SRBench, the first comprehensive benchmark with 239 challenging problems across various scientific domains, designed to evaluate LLM-based scientific equation discovery methods.", + "bbox": [ + 84, + 844, + 477, + 906 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a novel benchmark design through alternative mathematical representations (LSR-Transform) and synthetic, discovery-driven problems (LSR-Synth) to ensure rigorous evaluation of scientific reasoning and discovery capabilities beyond LLM memorization.", + "- Extensive experiments on state-of-the-art methods reveal performance peaks at $31\\%$ , highlighting the benchmark's challenging nature and its potential for future research." + ], + "bbox": [ + 496, + 84, + 885, + 213 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. LLM-SRBench", + "text_level": 1, + "bbox": [ + 496, + 234, + 651, + 250 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We introduce LLM-SRBench, a novel benchmark designed to evaluate LLM-based methods for data-driven scientific equation discovery. As shown in Fig. 2, in this benchmark, a \"data-driven scientific equation discovery\" task is defined as follows: Given a task dataset $\\mathcal{D}$ , the corresponding scientific context $\\mathcal{C}$ , the objective is to derive a hypothesis $h$ that represents the underlying mathematical relations behind the data with high precision and scientific plausibility. This process resembles the iterative search and refinement undertaken by human scientists, where LLMs act as optimizers, proposing and refining hypotheses based on both scientific knowledge and empirical data.", + "bbox": [ + 495, + 260, + 887, + 441 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1. LSR-Transform", + "text_level": 1, + "bbox": [ + 496, + 452, + 643, + 465 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This category is designed to evaluate whether LLM-based methods can discover equations in less common mathematical forms, avoiding reliance on memorization of well-known representations. This approach is motivated by the observation that LLMs often struggle with unfamiliar instantiations of otherwise familiar problems, as highlighted by recent studies on the fragility of LLM reasoning (Mirzadeh et al., 2024; Xie et al., 2024; Wu et al., 2023). By transforming existing benchmark problems into different mathematical representations, we challenge LLMs' capabilities in data-driven scientific equation discovery and reasoning.", + "bbox": [ + 495, + 474, + 888, + 642 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We build on the Feynman (Udrescu & Tegmark, 2020) benchmark (current standard benchmark in scientific equation discovery), which consists of 100 physics equations, and systematically transform these equations into alternative mathematical forms (examples in App. A.1). As demonstrated in Fig. 3(a), the transformation process involves seven key steps: 1) Equation Collection: We gather the original mathematical expressions, along with their input and output variables, and scientific problem descriptions from the Feynman benchmark. 2) Select Pivot Variable: For each equation, we choose an input feature to become the new target variable. 3) Feature-Target Transformation: We transform the dataset by switching the roles of the selected input feature and the original target variable. 4) Symbolic Transformation: Using the SymPy library in Python on the parsed expressions, we solve each equation with respect to the selected input variable, treating it", + "bbox": [ + 495, + 648, + 888, + 905 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f71f46fe53f6fb3b1b827b582fcd45a5f92ea2842866c6ce5dcdfaf13dedb8d8.jpg", + "image_caption": [ + "(a) LSR-Transform" + ], + "image_footnote": [], + "bbox": [ + 125, + 84, + 478, + 354 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4c620bd69fe9f710128e1e41b30a46b9f571f0d7ea8754a51132638fc907b0c9.jpg", + "image_caption": [ + "(b) LSR-Synth", + "Figure 3. Data generation pipelines for the two dataset categories in LLM-SRBench. (a) LSR-Transform converts Feynman problems into alternative mathematical forms through symbolic transformation and input-output role switching, and (b) LSR-Synth generates novel discovery-driven problems by combining known scientific terms in the underlying models with synthetic novel terms. Both pipelines include validation steps to ensure solvability and scientific plausibility." + ], + "image_footnote": [], + "bbox": [ + 486, + 83, + 843, + 353 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "as the new output and the original output variable as an input in the transformed equation. 5) Solvability Check: We retain only those transformations that are analytically solvable, ensuring the feasibility of the resulting equations. 6) Dataset Refinement: For the transformed equations with altered data domains (e.g., due to square roots or denominators), we filter the original Feynman dataset to ensure all data points fall within the valid domains of the new equations. 7) Problem Reformulation: Using LLM (GPT4o), we generate a new natural language specification for each transformed problem. During this data generation process, we constrain the transformed equations' complexity (measured by expression tree node count) to the range of original Feynman dataset distribution (full analysis in Fig. 8, App.A.1). This allows us to focus on the semantic aspects of discovery—specifically the interplay between reasoning and memorization of the mathematical forms—rather than conflating performance with the ability to handle syntactically complex and lengthy hypotheses. We also exclude transformed problems that LLM can solve through direct sampling without requiring access to data.", + "bbox": [ + 84, + 459, + 475, + 777 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This process yields 111 total transformed equations derived from the 100 original Feynman problems. Each transformed equation shares the same scientific context, problem description, and variables as its original counterpart but presents a less common mathematical form to be discovered. The goal of LSR-Transform is not to discover new equations but to evaluate whether LLM-based systems can validate discoveries from non-trivial, data-driven transformations of known", + "bbox": [ + 84, + 782, + 475, + 905 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "equations. To support scientific knowledge-guided discovery, each task in LSR-Transform is supplemented with a natural language description of the scientific problem and dataset, including variable names and their meanings. These descriptions are absent in the original Feynman benchmark but they are needed for LLM-based scientific equation discovery methods to provide scientific context in prompts for knowledge-guided equation discovery by LLMs.", + "bbox": [ + 496, + 459, + 888, + 580 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2. LSR-Synth", + "text_level": 1, + "bbox": [ + 496, + 595, + 609, + 611 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This category is designed to assess whether LLMs can discover equations that incorporate new synthetic terms alongside known terms, requiring scientific as well as data-driven reasoning rather than reliance on memorization. The LSR-Synth dataset is motivated by the approach introduced in (Shojae et al., 2024b) for the handful of manually designed problems and systematically expands it into a comprehensive set of benchmark problems across diverse scientific domains. By combining known terms with synthetic, novel terms, LLMs are challenged to demonstrate discovery capabilities in unobserved contexts, yet leverage their knowledge in the process. The LSR-Synth dataset spans four scientific domains: chemistry, biology, physics, and material science, focusing on key scientific problems, including reaction kinetics in chemistry, population growth in biology, damped harmonic oscillators in physics, and stress-strain relationships in material science (examples in App. A.2).", + "bbox": [ + 495, + 619, + 888, + 877 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The data generation process for LSR-Synth involves multi", + "bbox": [ + 496, + 883, + 888, + 900 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ple steps , as illustrated in Fig. 3(b), to ensure the creation of high-quality, challenging benchmark problems: 1) Select Scientific Problem: We select problems from different scientific domains, such as reaction kinetics in chemistry or population dynamics in biology. 2) Known Term Generation: Given the problem description, we prompt an LLM (GPT-4o) to generate a list of common and well-known mathematical terms that typically appear in the underlying models. 3) Synthetic Term Generation: Similarly, we prompt the LLM to generate a list of diverse novel synthetic terms for a given scientific problem, along with descriptions of the problem and variables. For example, in chemistry reaction kinetics, known terms for reaction rate $(dA / dt)$ based on concentration $(A)$ and time $(t)$ might include first-order $(-kA)$ and second-order kinetics $(-kA^2)$ or the exponential decay term $-k\\exp (-k_st)$ , while synthetic terms could represent non-linear high-order saturation, e.g., $kA^2 /(1 + \\beta A^4)$ , or non-linear quantum tunneling effects, e.g., $kA\\exp (-\\frac{\\gamma}{t}) / t^2$ . 4) Solvability Check: After sampling from the generated known and synthetic terms and combining them into a complete mathematical expression, we verify the solvability of these expressions using numerical solvers such as solve_ivp in Python. This step ensures that the expressions are feasible, providing a basis for generating datapoints. 5) Novelty Check: In the context of each scientific problem and the complete expression, we evaluate the novelty of the new generated task using LLM (GPT-4o) as a novelty evaluator. This step is to verify that the synthetic terms are novel in the provided context and require data-driven reasoning rather than relying on established knowledge to be discovered. 6) Databe point Generation: For expressions that pass the solvability and novelty checks, we generate datapoints using numerical solvers based on the specified initial conditions and parameters. These datapoints are used to create the final task datasets. 7) Expert Validation: Finally, the filtered expressions, along with visualizations of their generated datapoints, are cross-checked by two subject matter experts to validate their plausibility. After these filtering steps, we finalize a candidate list of 128 problems across the four domains (36: chemistry; 24: biology; 43: physics; and 25: material science). More detailed analysis of LLM-SRBench datasets are provided in App. A.", + "bbox": [ + 84, + 85, + 475, + 736 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.3. Evaluation", + "text_level": 1, + "bbox": [ + 86, + 750, + 194, + 763 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluating LLM-based scientific equation discovery methods introduces unique challenges due to the open-ended nature of the task and diverse symbolic representation of hypotheses. A discovered equation can be assessed from two perspectives: (a) data fidelity, which measures how well the equation fits the observed and out-of-domain (OOD) data, and (b) symbolic accuracy, which evaluates the alignment with ground-truth symbolic equation hypotheses. Both", + "bbox": [ + 84, + 773, + 475, + 893 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "perspectives are critical, as equations may exhibit similar symbolic forms but differ numerically, or vice versa.", + "bbox": [ + 496, + 84, + 885, + 114 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Data Fidelity. We evaluate data-driven fidelity using two known metrics in equation discovery: (1) Accuracy to tolerance $\\tau$ ( $\\mathrm{Acc}_{\\tau}$ ) (Kamienny et al., 2022; Biggio et al., 2021), and Normalized Mean Squared Error (NMSE). These metrics are computed on both in-domain test data and OOD data (when available) to assess generalization capacity, a crucial requirement for scientific equations.", + "bbox": [ + 496, + 119, + 885, + 227 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {A c c} _ {\\tau} = \\mathbb {1} \\left(\\max _ {1 \\leq i \\leq N _ {\\text {t e s t}}} \\left| \\frac {\\hat {y} _ {i} - y _ {i}}{y _ {i}} \\right| \\leq \\tau\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 560, + 241, + 821, + 273 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {N M S E} = \\frac {\\sum_ {i = 1} ^ {N _ {\\mathrm {t e s t}}} (\\hat {y} _ {i} - y _ {i}) ^ {2}}{\\sum_ {i = 1} ^ {N _ {\\mathrm {t e s t}}} (y _ {i} - \\bar {y}) ^ {2}}\n$$\n", + "text_format": "latex", + "bbox": [ + 630, + 277, + 821, + 316 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Symbolic Accuracy. We evaluate symbolic accuracy with a model-based evaluation strategy using GPT-4o as an evaluator (prompt in App. B, Fig. 11). This approach addresses the limitations of current symbolic metrics like recovery rate in symbolic regression (La Cava et al., 2016), which are very sensitive to exact symbolic matches and fail to account for mathematical equivalence, particularly in different hypothesis representations (e.g., equation as strings, expression trees, or Python programs). Here, GPT-4o evaluates mathematical equivalence by comparing the symbolic form of the predicted hypothesis versus the ground-truth equation after removing parameters and constants. The ability of LLMs to recognize semantic equivalence across different representations makes them particularly well-suited for evaluating LLM-based equation discovery methods, which often operate within a more diverse and open-ended hypothesis space. To validate this metric, two authors also independently evaluated symbolic equivalence on 130 sampled problems, finding $94.6\\%$ agreement between GPT-4o and human evaluators. App. B provides more details on the evaluation metrics.", + "bbox": [ + 496, + 323, + 887, + 638 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3. Experiments", + "text_level": 1, + "bbox": [ + 496, + 659, + 629, + 676 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 496, + 685, + 669, + 700 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We benchmark state-of-the-art LLM-based scientific equation discovery methods using three LLM backbones: one open-source model (Llama-3.1-8B-Instruct) and two proprietary models (GPT-4o-mini and GPT-3.5-turbo). Each discovery task takes as input the problem description, variables, the corresponding dataset, and an instruction specifying the task. The discovery methods then generate and refine equation hypotheses through LLMs. To ensure fair comparison, we standardize each of the methods to use 1k LLM calls per problem while maintaining their core algorithmic designs and hyperparameter settings. Detailed implementation specifics and prompts of each method are provided in App. C. We", + "bbox": [ + 495, + 709, + 885, + 905 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/9f909e31eff9f4ce76eed70ce90bc2bb14486252d0d4d100e27f4576d9575cab.jpg", + "table_caption": [ + "Table 1. Comparison of different LLM-based scientific equation discovery methods on LLM-SRBench. Performance metrics include symbolic accuracy (SA), numeric precision $(\\mathrm{Acc}_{0.1})$ , and normalized mean squared error (NMSE). Bold values indicate best performance within each method, and underlined values show best overall performance across discovery methods." + ], + "table_footnote": [], + "table_body": "
ModelsLSR-TransformLSR-Synth
ChemistryBiologyPhysicsMaterial Science
SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓
Direct Prompting (DataBlind)
Llama-3.1-8B-Instruct3.611.8010.36970.00.00.06440.00.00.54810.00.00.04590.00.00.0826
GPT-3.5-turbo2.101.8010.35530.08.330.00230.04.160.59900.02.270.02740.00.00.0277
GPT-4o-mini7.216.3060.26310.013.880.02210.04.160.46484.549.090.06470.00.00.0484
SGA (Ma et al., 2024)
Llama-3.1-8B-Instruct2.700.9090.35190.08.330.04580.00.00.24160.02.270.15490.012.120.0435
GPT-3.5-turbo0.00.9090.34650.08.330.00710.08.330.12792.274.540.02490.028.100.0019
GPT-4o-mini9.918.110.23210.016.665.46e-44.1612.510.01284.549.090.05110.036.116.02e-4
LaSR (Grayeli et al., 2024)
Llama-3.1-8B-Instruct5.4145.940.00210.027.772.77e-44.1616.662.73e-44.5425.020.00188.2164.227.44e-5
GPT-3.5-turbo12.6147.740.00150.038.891.51e-40.016.662.31e-46.8122.710.001120.6664.093.77e-5
GPT-4o-mini6.3150.450.00112.7738.929.11e-58.3320.831.53e-49.9131.819.94e-428.1272.049.23e-6
LLM-SR (Shojaece et al., 2024b)
Llama-3.1-8B-Instruct30.6338.550.01018.3366.668.01e-625.3058.331.04e-66.9734.091.23e-44.1088.121.15e-7
GPT-3.5-turbo10.8110.810.14490.050.222.87e-50.025.032.33e-50.025.128.84e-412.4282.142.75e-8
GPT-4o-mini31.5339.640.009111.1152.774.12e-616.6629.163.06e-69.9136.367.62e-520.2488.283.21e-9
", + "bbox": [ + 89, + 143, + 885, + 429 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "evaluate the following discovery methods:", + "bbox": [ + 84, + 455, + 366, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "LLM-SR (Shojace et al., 2024b), a program search equation discovery method that generates hypotheses of equation skeleton as Python functions with the main idea of combining LLMs' scientific knowledge with multi-island evolutionary search guided by feedback from data.", + "bbox": [ + 84, + 477, + 473, + 553 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "LaSR (Grayeli et al., 2024), a concept learning equation discovery method that finds abstract textual concepts of mathematical relations from successful equation hypotheses with LLMs and uses these concepts to evolve new hypotheses through a hybrid approach of evolutionary search (with PySR (Cranmer, 2023)) and LLM-guided search.", + "bbox": [ + 84, + 559, + 475, + 650 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "SGA (Ma et al., 2024), a bilevel optimization equation discovery method that iteratively combines LLMs for discrete hypothesis generation of scientific laws and physical simulations in PyTorch for continuous parameter optimization with respect to data.", + "bbox": [ + 84, + 656, + 475, + 731 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Direct Prompting (DataBlind) serves as a baseline for generating hypotheses purely from contextual information without access to data. By not using data-driven reasoning and refinement in the hypothesis generation, this baseline helps to assess LLMs' memorization of the problem.", + "bbox": [ + 84, + 737, + 475, + 814 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2. Main Results", + "text_level": 1, + "bbox": [ + 84, + 830, + 212, + 844 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our experimental results (Table 1) reveals several key insights into the strengths and limitations of LLM-based scientific equation discovery methods. Overall, performance", + "bbox": [ + 84, + 853, + 475, + 898 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e6d31a599f2e56fc3e8bcf784ee3f05f71964409e21e6a4b52c86a09aa0d91e6.jpg", + "image_caption": [ + "Figure 4. Performance comparison across equation complexity levels for Feynman and LSR-Transform datasets: (a) symbolic accuracy and (b) numeric precision $(\\mathrm{Acc}_{0.1})$ showing considerable performance gap between these two datasets at same complexity levels (averaged over all method-LLM pairs)." + ], + "image_footnote": [], + "bbox": [ + 500, + 455, + 687, + 602 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/396f55c66c8f2289a25537401b85bf1309aa6f76d7a756db809edb8483380734.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 455, + 885, + 602 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "remains relatively low across both symbolic and numeric metrics, underscoring the fundamental challenges of this task. One key observation is the poor performance of direct prompting method (DataBlind), which only relies on LLMs' knowledge about the problem without access to data for data-driven refinement. This result underscores the necessity of combining LLM reasoning with observational data, as relying solely on prior knowledge proves insufficient for accurate equation discovery across different problems in LLM-SRBench. We observe that on LSR-Transform data group, LaSR achieves the highest numerical accuracy, leading in both $\\mathrm{Acc}_{0.1}$ and NMSE, while LLM-SR with GPT", + "bbox": [ + 495, + 724, + 888, + 905 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/894c6bbe436e91e344a48f1b4afa6678514260a01ee7aec8ebbf71e5ed22ed03.jpg", + "image_caption": [ + "Figure 5. Detailed results of in-domain (ID) and out-of-domain (OOD) performance using Normalized Mean Squared Error across various LSR-Synth scientific domains and LLM-based equation discovery methods (with GPT-4o-mini as LLM backbone)." + ], + "image_footnote": [], + "bbox": [ + 109, + 85, + 867, + 272 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4o-mini outperforms other methods in symbolic accuracy $(\\sim 31\\%)$ . This comparative advantage inverts in the LSR-Synth material science problems, where LaSR consistently yields better symbolic accuracy and LLM-SR achieves better numerical precision, suggesting that different equation discovery strategies may be better suited to different problems.", + "bbox": [ + 83, + 342, + 475, + 448 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Another notable observation is the consistent outperformance of models using GPT-4o-mini and Llama-3.1-8B compared to those based on GPT-3.5-turbo. This may be due to improved reasoning architectures or better effectiveness of smaller, less opinionated models in the search and exploration needed for navigating space of possible equations. The lower performance on LSR-Synth compared to LSR-Transform tasks also indicates that the ability to find transformed variants of known problems does not necessarily extend to more challenging scenarios involving novel synthetic terms, where systematic data-driven exploration becomes essential.", + "bbox": [ + 83, + 455, + 477, + 636 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3. Analysis", + "text_level": 1, + "bbox": [ + 84, + 647, + 181, + 662 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "LSR-Transform vs. Feynman datasets. We analyze the performance gap between Feynman and LSR-Transform datasets across different equation complexity levels, measured by the number of nodes in the corresponding expression tree (La Cava et al., 2021). Fig. 4 shows the aggregated average performance (over all methods and LLM backbones) in terms of both symbolic accuracy (a) and numeric precision (b). It can be observed that even at the same complexity levels, LSR-Transform problems are substantially more challenging for current discovery methods than original Feynman problems. Also, this performance disparity persists even for simpler problems ([0-15] nodes), indicating that the challenging nature of LSR-Transform problems for LLM-based scientific equation discovery methods is not necessarily due to the structural complexity.", + "bbox": [ + 83, + 670, + 477, + 898 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Performance on In-domain vs. OOD. Generalization to unseen data is a fundamental requirement for scientific laws and a critical aspect of equation discovery. A correct mathematical model of observations should not only fit observed data but also extrapolate accurately to out-of-domain (OOD) scenarios. However, current equation discovery benchmarks largely overlook this aspect. In this work, we advocate for explicit OOD assessment in scientific equation discovery by introducing held-out OOD test sets in our benchmark. To systematically evaluate generalization beyond observed data, we generate dedicated OOD test sets for synthetic problems in the LSR-Synth category (see App. A for details on data generation). Fig. 5 provides a comparative analysis of ID vs. OOD results. As expected, all discovery methods exhibit higher NMSE in OOD settings, indicating degraded generalization compared to in-domain data. Among the evaluated methods, LLM-SR achieves the lowest NMSE across both ID and OOD settings, while direct prompting performs the worst. Also, we observe some domain-specific variations in generalization performance: the performance gap between ID and OOD is more pronounced in chemistry and biology problems compared to physics and material science, although the complexity of problems are designed to be similar, as shown in Fig. 10. This suggests that different scientific problems may pose distinct challenges for equation discovery methods, highlighting the need for future research to develop more robust approaches for different scientific disciplines.", + "bbox": [ + 495, + 342, + 888, + 765 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "OD generalization and symbolic accuracy. We further analyzed the correlation between our proposed symbolic accuracy metric (Sec. 2.3) and data-driven extrapolation performance in OOD settings (averaged over all LSR-Synth domains). As shown in Fig. 6, symbolic accuracy exhibits a strong positive correlation with numerical precision $(\\mathrm{Acc}_{0.1})$ on OOD data and a corresponding negative correlation with numerical error (NMSE). This strong correlation observed", + "bbox": [ + 495, + 772, + 888, + 893 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ca65ea72e3598758183c47b1c9383295ef13fd6f3425058eab5ecd3109f15d85.jpg", + "image_caption": [ + "Figure 6. Correlation between symbolic accuracy and OOD performance across different equation discovery methods and LLM backbones: (a) symbolic accuracy vs. $\\mathrm{Acc}_{0.1}$ showing positive correlation; (b) symbolic accuracy vs. normalized mean squared error showing negative correlation. Results are averaged over all LSR-Synth datasets." + ], + "image_footnote": [], + "bbox": [ + 86, + 82, + 292, + 223 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/829ccf7d250536ce02ae636e646d148b83b71780115b434595a8856f534f2c26.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 82, + 470, + 222 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "between symbolic and OOD performance provides two key insights: First, it establishes OOD evaluation as a powerful approach for assessing the discovery of generalizable equations—an aspect often underutilized in symbolic regression research; second, it validates our LLM-based symbolic evaluation approach through its strong alignment with numeric generalization performance.", + "bbox": [ + 83, + 357, + 473, + 464 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "More detailed experimental results, including both qualitative analyses of discovered equations and quantitative performance comparisons across scientific equation discovery methods and LLMs, are provided in App. D.", + "bbox": [ + 83, + 470, + 475, + 531 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4. Related Work", + "text_level": 1, + "bbox": [ + 84, + 544, + 227, + 559 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "AI for Scientific Discovery. Recent advancements in AI for science highlight the ability of LLMs to generate scientific hypotheses by leveraging their extensive knowledge and reasoning capabilities (Lu et al., 2024; Ji et al., 2024; Reddy & Shojaee, 2024). LLM agents, when augmented with external tools and scientific simulators, have shown promise in automated scientific data-driven analysis (Majumder et al., 2024a). While recent benchmarks have been developed to evaluate LLMs and agents in hypothesis generation and scientific question answering (Majumder et al., 2024b; Chen et al., 2024), evaluation for equation discovery and symbolic regression—one of the core tasks in scientific discovery—remains yet unexplored.", + "bbox": [ + 83, + 565, + 473, + 762 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Symbolic Regression. Symbolic regression approaches fall into three main categories: search-based methods that explore equation spaces via evolutionary algorithms or reinforcement learning (Schmidt & Lipson, 2009; Cranmer, 2023; Petersen et al., 2021; Sun et al., 2023), learning-based methods leveraging pre-trained Transformers on synthetic data (Biggio et al., 2021; Kamienny et al., 2022), and hybrid approaches that guide search using neural priors (Landajuela et al., 2022; Shojaee et al., 2024a; Mundhenk et al., 2021;", + "bbox": [ + 83, + 768, + 475, + 905 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Meidani et al., 2023). While these methods have advanced the field of automated symbolic function discovery from data, they mostly lack mechanisms to incorporate scientific domain knowledge into the discovery process.", + "bbox": [ + 495, + 84, + 885, + 147 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "LLMs for Equation Discovery. Recent work has leveraged LLM-based symbolic regression to enhance scientific equation discovery through various approaches leveraging LLMs' knowledge. LLM-SR (Shojaee et al., 2024b) utilizes LLMs' embedded scientific knowledge to generate initial equation hypotheses in the form of Python programming functions, which are then refined through adaptive mutation and crossover operations with LLMs as evolutionary optimizers. In-Context Symbolic Regression (ICSR) (Merler et al., 2024) employs an iterative few-shot learning paradigm over expression candidates, using previously tested successful expressions along with their fitness scores to guide the generation of improved candidates. LaSR (Grayeli et al., 2024) alternates between hypothesis evolution, concept abstraction, and concept iteration phases to build a learned library of scientific concepts for mathematical relations needed to find the equation for a given data. The learned concepts are then used with pure evolutionary search methods (Cranmer, 2023) like PySR (Cranmer, 2023) as well as LLM-guided search to guide the equation hypothesis evolution. Scientific Generative Agent (SGA) (Ma et al., 2024) also implements a bilevel optimization framework for equation discovery where LLMs iteratively propose discrete hypotheses for scientific laws while physical simulations in PyTorch provide experimental validation and data-driven parameter optimization.", + "bbox": [ + 495, + 152, + 887, + 546 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Symbolic Regression Benchmarks. Symbolic regression benchmarks can be broadly categorized into scientific discovery-oriented and general-purpose mathematical discovery collections. The scientific equation discovery benchmarks are primarily represented by the SRBench (La Cava et al., 2021) and SRSD (Matsubara et al., 2022) benchmarks. SRBench incorporates two key data groups for this purpose: the Feynman physics equations (Udrescu & Tegmark, 2020), and Strogatz dynamical systems (La Cava et al., 2016; Strogatz, 2018). A notable extension to this framework is presented in SRSD (Matsubara et al., 2022), which enhances the Feynman benchmark by incorporating physically meaningful sampling ranges for datapoints. The second category includes benchmarks like the Nguyen collection (Uy et al., 2011) and SRBench's black-box regression problems (La Cava et al., 2016) which include datasets without scientific contexts. However, these existing benchmarks are not well-suited for evaluating LLM-based equation discovery methods. These general-purpose benchmarks focus on the data-driven discovery of abstract mathematical functions without scientific context, while the former scientific benchmarks consist of well-known equations likely memorized by LLMs, enabling success through recitation rather than", + "bbox": [ + 495, + 551, + 888, + 901 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "scientific reasoning and discovery. Our work extends this line of research by focusing on scientific equation discovery with LLMs, designing the first comprehensive benchmark to assess discovery capabilities of LLM-based scientific equation discovery methods beyond memorization.", + "bbox": [ + 84, + 84, + 473, + 161 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 84, + 179, + 205, + 195 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We introduce LLM-SRBench, the first comprehensive benchmark for LLM-driven scientific equation discovery, encompassing 239 tasks across two distinct categories: LSR-Transform (111 problems derived from transformations of established physical models) and LSR-Synth (128 novel synthetic problems spanning four scientific disciplines). Our benchmark provides a standardized and multi-faceted evaluation protocol for assessing scientific equation discovery with LLMs, accommodating diverse hypothesis representations, including expression strings and programs. Extensive experiments with state-of-the-art discovery methods and various LLM backbones on LLM-SRBench show a peak performance of only $31\\%$ , highlighting the significant challenges and open research opportunities in this domain. We envision that LLM-SRBench benchmark datasets and its evaluation protocol could serve as a foundation for future research, driving progress in automated equation discovery and advancing our understanding of LLMs in symbolic reasoning needed in scientific discovery.", + "bbox": [ + 86, + 205, + 475, + 492 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Impact Statement", + "text_level": 1, + "bbox": [ + 86, + 512, + 240, + 529 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The development and future adoption of LLM-SRBench as a benchmark for evaluating LLM-based scientific equation discovery has the potential to significantly impact the field of artificial intelligence for science and scientific discovery. There are many potential societal consequences of our work, none of which we feel must be specifically highlighted here.", + "bbox": [ + 84, + 537, + 475, + 628 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 86, + 645, + 218, + 660 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This research was partially supported by the U.S. National Science Foundation (NSF) under Grant No. 2416728.", + "bbox": [ + 84, + 667, + 473, + 699 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 86, + 718, + 181, + 734 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Biggio, L., Bendinelli, T., Neitz, A., Lucchi, A., and Paras-candolo, G. Neural symbolic regression that scales. In Meila, M. and Zhang, T. (eds.), Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pp. 936-945. PMLR, 18-24 Jul 2021.", + "Chen, Z., Chen, S., Ning, Y., Zhang, Q., Wang, B., Yu, B., Li, Y., Liao, Z., Wei, C., Lu, Z., et al. Scienceagentbench: Toward rigorous assessment of language" + ], + "bbox": [ + 86, + 742, + 475, + 906 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "agents for data-driven scientific discovery. arXiv preprint arXiv:2410.05080, 2024.", + "Cranmer, M. Interpretable machine learning for science with pysr and symbolicregression. jl. arXiv preprint arXiv:2305.01582, 2023.", + "Du, M., Chen, Y., Wang, Z., Nie, L., and Zhang, D. Large language models for automatic equation discovery of nonlinear dynamics. Physics of Fluids, 36(9), 2024.", + "Grayeli, A., Sehgal, A., Costilla-Reyes, O., Cranmer, M., and Chaudhuri, S. Symbolic regression with a learned concept library. arXiv preprint arXiv:2409.09359, 2024.", + "Ji, H., Wang, Q., Downey, D., and Hope, T. Scimon: Scientific inspiration machines optimized for novelty. In ACL Anthology: Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 279-299. University of Illinois Urbana-Champaign/CABBI, 2024.", + "Kamienny, P.-A., d'Ascoli, S., Lample, G., and Charton, F. End-to-end symbolic regression with transformers. In Advances in Neural Information Processing Systems, 2022.", + "La Cava, W., Danai, K., and Spector, L. Inference of compact nonlinear dynamic models by epigenetic local search. Engineering Applications of Artificial Intelligence, 55:292-306, 2016. ISSN 0952-1976. doi: https://doi.org/10.1016/j.engappai.2016.07.004. URL https://www.sciencedirect.com/science/article/pii/S0952197616301294.", + "La Cava, W., Orzechowski, P., Burlacu, B., de Franca, F., Virgolin, M., Jin, Y., Kommenda, M., and Moore, J. Contemporary symbolic regression methods and their relative performance. In Vanschoren, J. and Yeung, S. (eds.), Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, volume 1, 2021.", + "Landajuela, M., Lee, C., Yang, J., Glatt, R., Santiago, C. P., Aravena, I., Mundhenk, T. N., Mulcahy, G., and Petersen, B. K. A unified framework for deep symbolic regression. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K. (eds.), Advances in Neural Information Processing Systems, 2022.", + "Langley, P. Data-driven discovery of physical laws. Cognitive Science, 5(1):31-54, 1981.", + "Lu, C., Lu, C., Lange, R. T., Foerster, J., Clune, J., and Ha, D. The ai scientist: Towards fully automated open-ended scientific discovery. arXiv preprint arXiv:2408.06292, 2024." + ], + "bbox": [ + 500, + 84, + 885, + 904 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 71 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ma, P., Wang, T.-H., Guo, M., Sun, Z., Tenenbaum, J. B., Rus, D., Gan, C., and Matusik, W. LLM and simulation as bilevel optimizers: A new paradigm to advance physical scientific discovery. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=hz8cFsdz7P.", + "Majumder, B. P., Surana, H., Agarwal, D., Hazra, S., Sabharwal, A., and Clark, P. Data-driven discovery with large generative models. arXiv preprint arXiv:2402.13610, 2024a.", + "Majumder, B. P., Surana, H., Agarwal, D., Mishra, B. D., Meena, A., Prakhar, A., Vora, T., Khot, T., Sabharwal, A., and Clark, P. Discoverybench: Towards data-driven discovery with large language models. arXiv preprint arXiv:2407.01725, 2024b.", + "Matsubara, Y., Chiba, N., Igarashi, R., Tatsunori, T., and Ushiku, Y. Rethinking symbolic regression datasets and benchmarks for scientific discovery. arXiv preprint arXiv:2206.10540, 2022.", + "Meidani, K., Shojaee, P., Reddy, C. K., and Farimani, A. B. Snip: Bridging mathematical symbolic and numeric realms with unified pre-training. In The Twelfth International Conference on Learning Representations, 2023.", + "Merler, M., Haitsiukevich, K., Dainese, N., and Marttinen, P. In-context symbolic regression: Leveraging large language models for function discovery. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop), pp. 589-606, 2024.", + "Mirzadeh, I., Alizadeh, K., Shahrokhi, H., Tuzel, O., Bengio, S., and Farajtabar, M. Gsm-symbolic: Understanding the limitations of mathematical reasoning in large language models. arXiv preprint arXiv:2410.05229, 2024.", + "Mundhenk, T. N., Landajuela, M., Glatt, R., Santiago, C. P., faissol, D., and Petersen, B. K. Symbolic regression via deep reinforcement learning enhanced genetic programming seeding. In Beygelzimer, A., Dauphin, Y., Liang, P., and Vaughan, J. W. (eds.), Advances in Neural Information Processing Systems, 2021.", + "Petersen, B. K., Larma, M. L., Mundhenk, T. N., Santiago, C. P., Kim, S. K., and Kim, J. T. Deep symbolic regression: Recovering mathematical expressions from data via risk-seeking policy gradients. In International Conference on Learning Representations, 2021.", + "Reddy, C. K. and Shojaee, P. Towards scientific discovery with generative ai: Progress, opportunities, and challenges. arXiv preprint arXiv:2412.11427, 2024." + ], + "bbox": [ + 86, + 84, + 478, + 905 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Schmidt, M. and Lipson, H. Distilling free-form natural laws from experimental data. Science Advance, 324 (5923):81-85, 2009. ISSN 0036-8075. doi: 10.1126/science.1165893.", + "Shojae, P., Meidani, K., Barati Farimani, A., and Reddy, C. Transformer-based planning for symbolic regression. Advances in Neural Information Processing Systems, 36, 2024a.", + "Shojaee, P., Meidani, K., Gupta, S., Farimani, A. B., and Reddy, C. K. Llm-sr: Scientific equation discovery via programming with large language models. arXiv preprint arXiv:2404.18400, 2024b.", + "Strogatz, S. H. Nonlinear dynamics and chaos with student solutions manual: With applications to physics, biology, chemistry, and engineering. CRC press, 2018.", + "Sun, F., Liu, Y., Wang, J.-X., and Sun, H. Symbolic physics learner: Discovering governing equations via monte carlo tree search. In The Eleventh International Conference on Learning Representations, 2023.", + "Udrescu, S.-M. and Tegmark, M. Ai feynman: A physics-inspired method for symbolic regression. Science Advances, 6(16):eaay2631, 2020. doi: 10.1126/sciadv.aay2631.", + "Uy, N. Q., Hoai, N. X., O'Neill, M., McKay, R. I., and Galván-López, E. Semantically-based crossover in genetic programming: application to real-valued symbolic regression. Genetic Programming and Evolvable Machines, 12:91-119, 2011.", + "Virgolin, M. and Pissis, S. P. Symbolic regression is NP-hard. Transactions on Machine Learning Research, 2022. ISSN 2835-8856.", + "Wu, Z., Qiu, L., Ross, A., Akyurek, E., Chen, B., Wang, B., Kim, N., Andreas, J., and Kim, Y. Reasoning or reciting? exploring the capabilities and limitations of language models through counterfactual tasks. arXiv preprint arXiv:2307.02477, 2023.", + "Xie, C., Huang, Y., Zhang, C., Yu, D., Chen, X., Lin, B. Y., Li, B., Ghazi, B., and Kumar, R. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024.", + "Zhang, Y., Zheng, K., Liu, F., Zhang, Q., and Wang, Z. Autoturb: Using large language models for automatic algebraic model discovery of turbulence closure. arXiv preprint arXiv:2410.10657, 2024." + ], + "bbox": [ + 500, + 84, + 885, + 849 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 71 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 86, + 83, + 171, + 99 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A. Dataset Details", + "text_level": 1, + "bbox": [ + 86, + 109, + 240, + 125 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A.1. LSR-Transform", + "text_level": 1, + "bbox": [ + 86, + 135, + 235, + 150 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The LSR-Transform is the first category of datasets in LLM-SRBench, designed to evaluate the ability of LLM-based scientific equation discovery methods in less common mathematical forms. This dataset challenges LLM-based discovery methods to avoid reliance on memorization of well-known representations and instead reason through unfamiliar instantiations of familiar problems. This approach is motivated by the observation that LLMs often struggle with unfamiliar instantiations of otherwise familiar problems, as highlighted by recent studies on the fragility of LLM reasoning (Mirzadeh et al., 2024). By transforming existing benchmark problems into alternative mathematical representations, LSR-Transform provides a rigorous testbed to evaluate how well LLM-based discovery methods perform in both (1) semantic scientific reasoning, which draws on LLMs' built-in scientific knowledge, and (2) data-driven reasoning, which utilizes experimental feedback for equation discovery. LSR-Transform builds on the Feynman benchmark (Udrescu & Tegmark, 2020), a widely used standard benchmark in scientific equation discovery and symbolic regression. The Feynman benchmark consists of 100 physics equations from Feynman Lecture Series $^{1}$ , representing fundamental laws in physics. While the Feynman benchmark has been instrumental in evaluating symbolic regression methods, it primarily tests the ability to recover equations in their standard, well-known forms which are mostly memorized by LLMs. However, real-world scientific equation discovery often involves reasoning about unknown equations based on domain expertise and knowledge from literature as well as empirical data observations. To address this gap, LSR-Transform transforms the original Feynman equations into less common alternative mathematical forms of the same physical problem by switching input-output variables and symbolically solving for the new target variables.", + "bbox": [ + 84, + 159, + 888, + 417 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/8ba728d78f2919727c08b3690d02bb7aec14be00772e7730233d78780cba6800.jpg", + "image_caption": [ + "Figure 7. Examples of how LLM-SRBench (LSR-Transform) problems can be obtained from original Feynman benchmark problems." + ], + "image_footnote": [], + "bbox": [ + 173, + 430, + 795, + 753 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Figure 7 demonstrates the equation transformation process, showing examples of the original Feynman problems (along with their scientific descriptions) and their potential transformed versions. These examples show the dataset's design for altering the mathematical representation of the same problem by analytically solving the equations with respect to different input variables. For instance, the original harmonic oscillator energy equation $E = \\frac{1}{4} m(\\omega^2 + \\omega_0^2)x^2$ is transformed into symbolic representation of $m = \\frac{4E}{(\\omega^2 + \\omega_0^2)x^2}$ and $\\omega = \\sqrt{\\frac{4E}{mx^2} - \\omega_0^2}$ where the target variable is switched from energy $(E)$", + "bbox": [ + 84, + 796, + 888, + 883 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 10 + }, + { + "type": "page_footnote", + "text": " float: \"\"Evaluate the equation on data observations.\"\"\"", + "guess_lang": "python", + "bbox": [ + 125, + 815, + 441, + 896 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/76b6590aa51f2645dd5819d5bfe3ca1e5f13e79490781dd4e4338503b9cb123c.jpg", + "table_caption": [ + "Table 2. Implementation details of LLM-based scientific equation discovery methods." + ], + "table_footnote": [], + "table_body": "
MethodParameters
Direct Prompting (DataBlind)Temperature τ = 0.85 equation program hypotheses sampled from LLM for initial promptNo access to data for data-driven refinementTime limit T = 30s per program hypothesis execution,BFGS optimizer from Scipy for parameter optimization of equation skeletons
SGA (Ma et al., 2024)PyTorch-based implementation of model and torch nn. Module classMean square error loss for data-driven feedback in agentic searchAdam optimizer in PyTorch for differential parameter optimization of equation skeletons
LaSR (Grayeli et al., 2024)Iterations = 25Cycles per iteration = 550Populations = 10Population size = 33Maximum size = 30Operators: +, *, -, /, ∧, exp, log, sqrt, sin, cos, tan, coshLLM weights: llm_mutate =0.005, llm_crossover =0.005, llm_gen_random =0.005Top-K = 20 concepts from libraryDefault configuration of PySR for parameter optimization
LLM-SR (Shojaee et al., 2024b)Temperature τ = 0.8Batch size b = 4 equation programs per prompte = 4 parallel evaluatorsTime limit T = 30s per program hypothesis,Memory limit M = 2GBm = 10 islands for population diversity through searchk = 2 in-context examples per promptMaximum 10 parameters per equation skeletonBFGS optimizer from Scipy for parameter optimization of equation skeletons
", + "bbox": [ + 173, + 107, + 803, + 441 + ], + "page_idx": 16 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "# Load data observations \ninputs, outputs = data['inputs'], data['outputs'] \nX = inputs \n# Optimize parameters based on data \nfrom scipy.optimize import minimize \ndef loss.params): \n y_pred = equation(*X, params) \n return np.mean((y_pred - outputs) ** 2) \nloss_partial = lambda params: loss.params) \nresult = minimize(loss_partial, [1.0]*MAX_NPARAMS, method='BFGS') \n# Return evaluation score \noptimized.params = result.x \nloss = result(fun \nif np.isnan(loss) or np.isinf(loss): \n return None \nelse: \n return -loss", + "guess_lang": "python", + "bbox": [ + 143, + 469, + 527, + 654 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "3. Equation example specification as Python programming function.", + "text_level": 1, + "bbox": [ + 86, + 671, + 535, + 686 + ], + "page_idx": 16 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "def equation_v0(\\(INPUT VAR[0], ..., \\)INPUT VAR[N], params):\n ''' Mathematical function for {$OUTPUT VAR_DESC}\nArgs:\n $INPUT VAR[0]: A numpy array representing observations of {$INPUT VAR_DESC[0]}.\n ...\n $INPUT VAR[N]: A numpy array representing observations of {$INPUT VAR_DESC[N]}.\nparams: Array of numeric constants or parameters to be optimized\nReturn: A numpy array representing {$OUTPUT VAR_DESC} as the result of applying the mathematical function to the inputs.\n'''# Equation example 1 logic as function body\n...\ndef equation_v1(\\)INPUT VAR[0], ..., \\)INPUT VAR[N], params):\n # Equation example 2\n...\n## Function to be completed", + "guess_lang": "python", + "bbox": [ + 125, + 700, + 815, + 904 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 71 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 477, + 922, + 493, + 934 + ], + "page_idx": 16 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "def equation( $INPUT VAR[0], ...,$ INPUT VAR[N], params):\n ''' Improvement version of equation_v0 and equation_v1'''", + "guess_lang": "python", + "bbox": [ + 125, + 88, + 486, + 108 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.2.2. LASR", + "text_level": 1, + "bbox": [ + 84, + 131, + 181, + 143 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We use the default prompts from LaSR's (Grayeli et al., 2024) public code repository (https://github.com/trishullah/LibraryAugmentedSymbolicRegression.jl), which includes:", + "bbox": [ + 83, + 155, + 885, + 185 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. The LLMINIT prompt, which is used in an LLM-augmented initialization operation.", + "2. LLMMUTATION prompt is used to mutate an expression based on a set of concepts.", + "3. LLMCROSSOVER prompt is used to construct a new expression from the crossover of two sampled expressions based on a set of concepts.", + "4. LLM Concept Abstraction prompt in CONCEPTABSTRACTION function, which extracts a natural language concept from current trends of hypotheses at each iteration.", + "5. LLM Concept Evolution prompt in CONCEPTEVOLUTION function, which creates a new concept that follows a set of ideas in the current library." + ], + "bbox": [ + 96, + 204, + 885, + 364 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In the following, we provide examples of these prompts.", + "bbox": [ + 84, + 383, + 455, + 401 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "1. LLMINIT prompt.", + "text_level": 1, + "bbox": [ + 86, + 407, + 228, + 422 + ], + "page_idx": 17 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": " \nYou are a helpful assistant that proposes a mathematical expression by following three provided suggestions. An expression must consist of the following variables: {{variables}}. All constants will be represented with the symbol C. Each expression will only use these operators: {{operators}}. \n \nSuggestion 1: {{assume1}} \nSuggestion 2: {{assume2}} \nSuggestion 3: {{assume3}} \nPropose {{N}} expressions that would be appropriate given the suggestions. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: \n``'json \n[\"expr1\", \"expr2\", ... \"expr{N}\"] \n]", + "guess_lang": "handlebars", + "bbox": [ + 125, + 428, + 828, + 594 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "2. LLMMUTATION prompt.", + "text_level": 1, + "bbox": [ + 84, + 613, + 274, + 628 + ], + "page_idx": 17 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": " \nYou are a helpful assistant that mutates a mathematical expression by following a few provided suggestions. You will be given three suggestions and a single reference expression to mutate. \nAn expression must consist of the following variables: $\\{\\{variables\\}\\}$ . All constants will be represented with the symbol C. Each expression will only use these operators: $\\{\\{\\mathrm{operators}\\}\\}$ \n \nSuggestion 1:{\\{assume1\\}} \nSuggestion 2:{\\{assume2\\}} \nSuggestion 3:{\\{assume3\\}} \nReference Expression:{\\{expr\\}} \nPropose $\\{\\{\\mathbf{N}\\}\\}$ expressions that would be appropriate given the suggestions and references. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: \n``'json \n[\"expr1\", \"expr2\", ... \"expr.{N}\"] \n]", + "guess_lang": "txt", + "bbox": [ + 125, + 636, + 828, + 819 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "3. LLMCROSSOVER prompt.", + "text_level": 1, + "bbox": [ + 86, + 838, + 284, + 853 + ], + "page_idx": 17 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": " \nYou are a helpful assistant that recombines two mathematical expressions by following a few provided suggestions. You will be given three suggestions and two reference expressions to recombine. \nAn expression must consist of the following variables: $\\{\\{variables\\}\\}$ . All constants will be represented with the symbol C. Each expression will only use these operators: $\\{\\{\\mathrm{operators}\\}\\}$", + "guess_lang": "latex", + "bbox": [ + 125, + 859, + 828, + 904 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 477, + 922, + 493, + 934 + ], + "page_idx": 17 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": " \nSuggestion 1: {{assume1}} \nSuggestion 2: {{assume2}} \nSuggestion 3: {{assume3}} \nReference Expression 1: {{expr1}} \nReference Expression 2: {{expr2}} \nPropose {{N}} expressions that would be appropriate given the suggestions and references. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: \n``'json \n[\"expr1\", \"expr2\", ... \"expr.{N}\" \n]", + "guess_lang": "handlebars", + "bbox": [ + 125, + 95, + 823, + 238 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "4. LLM Concept Abstraction prompt.", + "text_level": 1, + "bbox": [ + 84, + 257, + 334, + 273 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": " \nYou are a helpful assistant that hypothesizes about the underlying assumptions that generated a list of good and bad mathematical expressions in detailed ways. My ultimate goal is to discover what assumptions generated the observed good mathematical expressions and excludes the bad mathematical expressions. Focus more on the good expressions, their mathematical structure, and any relation to physical concepts. Note that capital C represents an arbitrary constant", + "guess_lang": "txt", + "bbox": [ + 125, + 280, + 828, + 327 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": " \nGood Expression 1: {gexpr1} \nGood Expression 2: {gexpr2} \nGood Expression 3: {gexpr3} \nGood Expression 4: {gexpr4} \nGood Expression 5: {gexpr5} \nBad Expression 1: {bexpr1} \nBad Expression 2: {bexpr2} \nBad Expression 3: {bexpr3} \nBad Expression 4: {bexpr4} \nBad Expression 5: {bexpr5} \nPropose $\\{\\{N\\}\\}$ hypotheses that would be appropriate given the expressions. Provide short commentary for each of your decisions. Do not talk about topics related to the simplicity or complexity of the expressions. I want ideas that are unique and interesting enough to amaze the world's best mathematicians. End with a JSON list that enumerates the proposed hypotheses following this format: \n``'json \n[\"hyp1\", \"hyp2\", ... \"hyp.{N}]'' \n]", + "guess_lang": "txt", + "bbox": [ + 125, + 333, + 818, + 542 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "5. LLM Concept Evolution prompt.", + "text_level": 1, + "bbox": [ + 84, + 563, + 323, + 578 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": " You are an insightful assistant skilled in logical reasoning and deduction. Your task is to analyze a set of ideas and infer nontrivial conclusions that logically follow from them. The ultimate goal is to uncover underlying principles or properties of the hidden expressions. Focus on providing logical conclusions that are unique, interesting, and profound.", + "guess_lang": "txt", + "bbox": [ + 125, + 587, + 834, + 625 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": " \nIdea 1:{ideal} \nIdea 2:{idea2} \nIdea 3:{idea3} \nIdea 4:{idea4} \nIdea 5:{idea5} \nBased on these ideas, deduce $\\{\\{N\\}\\}$ logical conclusions or hypotheses that directly follow from them. Provide a brief explanation for each conclusion, highlighting the logical connections between the ideas. Avoid discussing topics related to the simplicity or complexity of the expressions. Conclude with a JSON list that enumerates the proposed conclusions in the following format: \n``'json \n[\"Conclusion 1\", \"Conclusion 2\", ... \"Conclusion {{N}}]\" \n]", + "guess_lang": "txt", + "bbox": [ + 125, + 630, + 834, + 787 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C.2.3. SGA", + "text_level": 1, + "bbox": [ + 86, + 814, + 174, + 828 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The following prompts are used in our implementation of SGA (Ma et al., 2024) for scientific equation discovery tasks, following the original implementation SGA's public code repository (https://github.com/PingchuanMa/SGA), which includes:", + "bbox": [ + 84, + 838, + 888, + 881 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "System prompt for task.", + "bbox": [ + 84, + 891, + 246, + 905 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Code formatting prompt for scientific equation discovery task." + ], + "code_body": "You are an intelligent AI assistant for coding and scientific equation discovery. \nYou are tasked with discovering mathematical function structures for scientific systems. \nFollow the user's requirements carefully and make sure you understand them. \nKeep your answers short and to the point. \nDo not provide any information that is not requested.. \nAlways document your code as comments to explain the reason behind them. \nUse Markdown to format your solution. \nYou are very familiar with Python and PyTorch. \nDo not use any external libraries other than the libraries used in the examples.", + "guess_lang": "txt", + "bbox": [ + 125, + 95, + 643, + 178 + ], + "page_idx": 19 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "## PyTorch Tips\n1. When working with tensors, always use PyTorch's operators (such as 'torch.exp', 'torch.cos', 'torch.sqrt', ...) to ensure compatibility and optimal performance.\n2. In PyTorch, operator input arguments must be tensors, not floats.\n## Code Requirements\n1. The only library allowed is PyTorch. Follow the format provided by the user examples.\n2. Annotate the size of the tensor as comment after each tensor operation. For example, # (B, 3, 3).\n3. Separate the code into parameters that can be tuned with differentiable optimization and the symbolic expression represented by PyTorch code. Define them respectively in the\n5. The proposed code must strictly follow the structure and function signatures below:\n``'python\nimport torch\nimport torch(nn as nn)\nclass SymbolicEquation(nn.Module):\n def __init__(self, {PARAM_INPUTS}):\n Define trainable continuous parameters for differentiable optimization.\n Tentatively initialize the parameters with the default values in args.\n Args:\n {PARAM_DESCRIPTION}\n super().__init__()\n {PARAM_INIT}\n def forward(self, {INPUT_variables}) -> torch.Tensor:\n {FORWARD_FUNCTIONDescriptions}", + "guess_lang": "python", + "bbox": [ + 125, + 224, + 821, + 536 + ], + "page_idx": 19 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "1. Analyze step-by-step what the potential problem is in the previous iterations based on the feedback. Think about why the results from previous iterations mismatched with the ground truth. Do not give advice about how to optimize. Focus on the formulation of the scientific equation. Start this section with \"#Analysis\". Analyze all iterations individually, and start the subsection for each iteration with \"#Iteration N\", where N stands for the index. Remember to analyze every iteration in the history.", + "guess_lang": "txt", + "bbox": [ + 125, + 544, + 830, + 589 + ], + "page_idx": 19 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Context prompt for each scientific problem." + ], + "code_body": "2. Think step-by-step what you need to do in this iteration. Think about what is needed to improve performance. If the analysis suggests specific functional forms or constraints, think about how these will be incorporated into the symbolic equation. Think about how to separate your algorithm into a continuous parameter part and a symbolic expression model part. Describe your plan in pseudo-code, written out in great detail. Remember to update the default values of the trainable parameters based on previous optimizations. Start this section with \"# Step-by-Step Plan\".", + "guess_lang": "txt", + "bbox": [ + 124, + 597, + 834, + 642 + ], + "page_idx": 19 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "3. Output the code in a single code block ''``python ... ''`` with detailed comments in the code block. Do not add any trailing comments before or after the code block. Start this section with \"# Code\".", + "guess_lang": "txt", + "bbox": [ + 124, + 648, + 823, + 667 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "# # # Context", + "bbox": [ + 125, + 715, + 194, + 724 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The objective is to construct a mathematical expression that accurately maps input variables to a target output based on a provided dataset. The task involves filling in a code block to define a symbolic expression or model that minimizes the difference between predicted and ground-truth outputs. The code block defines a class with two functions: one for parameters within the expression and another for generating or modifying the symbolic structure of the expression. Feedback is provided in the form of metrics measuring the error between the model's predictions and the ground-truth values, as well as guidance on structural improvements to the symbolic expression.", + "bbox": [ + 124, + 734, + 834, + 787 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The expression represents $\\{$ OUTPUT VAR DESC\\}, given data on $\\{$ INPUTS DESC\\}.", + "bbox": [ + 125, + 794, + 568, + 806 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D. Additional Results and Analysis", + "text_level": 1, + "bbox": [ + 84, + 834, + 382, + 852 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Detailed Numeric Accuracy Analysis. While Table 1 presents median Normalized Mean Squared Error for each method-LLM combination across LLM-SRBench datasets, Figure 12 provides a more comprehensive view of error distributions across all samples. These box plots illustrate performance variations across LLM-SRBench datasets from two perspectives:", + "bbox": [ + 83, + 859, + 888, + 906 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 475, + 922, + 496, + 934 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "comparing different equation discovery methods with GPT-4o-mini as the LLM backbone, and examining different LLM backbones when using LLM-SR method. The substantial variance in NMSE performance across samples reflects the diverse complexity inherent in our benchmark—stemming from both the varying mathematical transformations in LSR-Transform and the different combinations of known and synthetic terms in LSR-Synth datasets. Notably, the relative difficulty of datasets varies across methods and LLM backbones, suggesting that different methods and LLMs possess distinct capabilities in terms of leveraging domain knowledge, reasoning, and generating novel hypotheses.", + "bbox": [ + 83, + 84, + 887, + 176 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/f5679f3c4d121b4ea5c20faf879d882e4987e7cc96f2c511c1b25316fcd262e4.jpg", + "image_caption": [ + "Figure 12. Normalized Mean Squared Error (NMSE) of discovered equations in various domains of LLM-SRBench with respect to (left) different equation discovery methods using GPT-4omini LLM backbone, and (right) different LLM backbones using LLM-SR method" + ], + "image_footnote": [], + "bbox": [ + 130, + 191, + 488, + 406 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/7287530c601c8f7a06f0551faaeaa9113407b59a179cd6bf6b36c8bedb772eb5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 490, + 191, + 846, + 406 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Symbolic Accuracy and Generalization. For scientific equation discovery methods, both symbolic accuracy and out-of-domain generalization serve as crucial evaluation metrics, reflecting the methods' ability to uncover true governing equations. Figure 13 examines the relationship between these metrics, plotting symbolic accuracy against both OOD accuracy and OOD NMSE across all method-LLM-domain combinations in LSR-Synth. The strong correlation observed between symbolic and OOD performance yields two important insights: first, it establishes OOD evaluation as a powerful metric for assessing the discovery of generalizable equations, an approach historically underutilized in symbolic regression; second, it validates our LLM-based symbolic evaluation approach through its strong alignment with numeric generalization performance.", + "bbox": [ + 83, + 488, + 887, + 595 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Qualitative Analysis of Outputs. To provide deeper insights into the behavior of different discovery methods, Figure 14 illustrates their final discovered hypotheses on a biological population growth problem (BPG0) using Llama-3.1-8B as the LLM backbone. Direct Prompting (Figure 14(a)) generates equations that capture basic population dynamics, demonstrating LLMs' ability to propose scientifically plausible structures. SGA's solution (Figure 14(b)) successfully incorporates one of the common population growth terms while exploring additional structural components. LaSR (Figure 14(c)) discovers an equation structure that combines multiple interaction terms, though it differs from established scientific formulations. LLM-SR (Figure 14(d)) combines both standard population dynamics terms and synthetic components in its solution. These examples demonstrate the diverse approaches methods take in balancing scientific interpretability with mathematical expressiveness when discovering equation structures.", + "bbox": [ + 83, + 611, + 887, + 748 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "E. Discussion and Future Directions", + "text_level": 1, + "bbox": [ + 84, + 766, + 393, + 782 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Our findings from LLM-SRBench reveal several key insights that inform the design of future LLMs for scientific discovery applications. Scientific equation discovery remains a challenging problem for LLMs, requiring a complex interplay of domain knowledge, search capabilities with data-driven feedback, and mathematical manipulation skills. Our results demonstrate that this problem poses significant challenges for LLM-based discovery frameworks across different model architectures, suggesting that current approaches may be fundamentally limited in their ability to perform genuine scientific discovery.", + "bbox": [ + 83, + 791, + 887, + 883 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "This work questions the current evaluation paradigm for equation discovery in emerging LLM-based techniques. We", + "bbox": [ + 84, + 890, + 885, + 906 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 475, + 922, + 495, + 934 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/221b1498b3f8e47e6bf90ed1d149435f76b18381710e3d5b0115980fca2e9cd3.jpg", + "image_caption": [ + "Figure 13. Symbolic Accuracy versus OOD performance over all domains, methods, and backbone LLM pairs." + ], + "image_footnote": [], + "bbox": [ + 127, + 84, + 844, + 329 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "demonstrate that existing benchmarks for this task are susceptible to memorization and inadequate for evaluating these techniques' true scientific discovery capabilities. Motivated by these limitations, we designed LLM-SRBench to address the memorization issue through two key innovations: synthetic imaginary scenarios (LSR-Synth category) that are not based on existing scientific knowledge and require data-driven discovery tools for solution, and transformed equations (LSR-Transform category) that convert common forms of scientifically known equations into less familiar formulations. The LSR-Synth category targets genuine innovation in LLM-based discovery techniques by eliminating the possibility of recalling memorized equations, while LSR-Transform problems are difficult to recite from memory and require reasoning over hypothesis generation steps, making them suitable candidates for evaluating recently emerging LLM-based scientific discovery agents. While the mathematical transformations in LSR-Transform are algebraically valid, their scientific meaningfulness varies considerably across contexts. Many transformations correspond to legitimate physics problems from the Feynman Lecture Series collection and represent alternative problem formulations with practical significance. For example, in the Harmonic Oscillator Energy problem, the original formulation $E = \\frac{1}{4} m(\\omega^2 + \\omega_0^2)x^2$ expresses energy as a function of system parameters, while the transformed version $m = \\frac{4E}{(\\omega^2 + \\omega_0^2)x^2}$ determines the mass required for given energy storage. This transformation maintains scientific meaning by addressing the engineering question of what mass is needed to store a specific amount of energy in an oscillating system, and such inversions are common in engineering design problems where system parameters must be determined to achieve desired performance characteristics. Similarly, the Electric Potential problem transforms from $V_e = \\frac{1}{4\\pi\\epsilon}\\frac{p_d\\cos(\\theta)}{r^2}$ (potential at a point due to a dipole) to $r = \\sqrt{\\frac{p_d\\cos(\\theta)}{4\\pi\\epsilon V_e}}$ (distance for a given potential), addressing the practical question of determining measurement distances in electrostatic experiments or sensor design.", + "bbox": [ + 83, + 388, + 888, + 688 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "However, not all transformations maintain clear physical interpretability. Some result in equations where the target variable appears in complex functional forms that may not correspond to natural physical questions, such as solving for angular frequency in oscillatory systems yielding expressions involving square roots of differences that lack intuitive physical meaning. Additionally, certain transformations may obscure natural causal relationships—transforming from “force causes acceleration” to “acceleration determines force” maintains mathematical validity but may not reflect underlying physical causality. The LSR-Transform category represents a deliberate balance between mathematical rigor and physical meaningfulness by constraining the complexity of transformed problems to match original problems, focusing on semantic rather than syntactic challenges in scientific equation discovery, while maintaining the original scientific context and variable meanings to ensure that underlying physics remains relevant even when mathematical formulation changes. The varying scientific meaningfulness of transformations reflects broader challenges in automated scientific discovery that warrant future investigation. Automated discovery systems must incorporate mechanisms to evaluate not only data-driven correctness but also scientific plausibility and interpretability of generated hypotheses, as mathematical validity alone is insufficient for meaningful scientific contribution. The most effective approach to scientific equation discovery likely involves close collaboration between AI systems, which excel at exploring vast hypothesis spaces, and human domain scientists, who can", + "bbox": [ + 83, + 694, + 888, + 906 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 475, + 922, + 495, + 934 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "assess scientific meaningfulness and guide discovery directions based on deep contextual understanding. Future equation discovery methods could improve by incorporating literature retrieval tools to build grounding foundations for scientific context and domain knowledge, helping to prioritize discoveries that are mathematically valid, data-consistent, novel, and scientifically meaningful. The field needs evaluation frameworks that assess not just mathematical correctness but also scientific novelty, interpretability, and practical applicability of discovered equations, moving beyond narrow accuracy metrics toward a more comprehensive understanding of what constitutes valuable scientific discovery in the age of LLMs with their vast scientific knowledge.", + "bbox": [ + 83, + 85, + 887, + 191 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "F. Comparison with Standard (non-LLM) Symbolic Regression Baselines", + "text_level": 1, + "bbox": [ + 84, + 209, + 699, + 227 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "To further validate the utility of LLM-SRBench and demonstrate the advantages of LLM-based approaches, we conducted additional experiments comparing LLM-based methods with traditional symbolic regression techniques that do not incorporate domain knowledge. We evaluated PySR (Cranmer, 2023), a state-of-the-art symbolic regression method based on genetic programming, on all LLM-SRBench datasets. PySR operates purely on numerical data points without access to the scientific context, variable descriptions, or domain knowledge that LLM-based methods can leverage in discovery process. We used PySR's default configuration with the same computational budget (equivalent number of evaluations) as the LLM-based methods to ensure fair comparison. Table 3 presents the performance comparison between the best-performing LLM-based method from Table 1 and PySR across all LLM-SRBench datasets. The results reveal several key insights about the complementary strengths and limitations of non-LLM versus LLM-based approaches in equation discovery.", + "bbox": [ + 83, + 234, + 887, + 373 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "PySR demonstrates competitive and sometimes even better numerical accuracy $(\\mathrm{Acc}_{0.1})$ across all datasets. However, PySR consistently shows significantly lower symbolic accuracy, particularly struggling with non-physics domains where it achieves $0\\%$ symbolic accuracy on chemistry, biology, and material science datasets. The performance gap is most pronounced in problems that require specialized scientific knowledge. While PySR can fit mathematical patterns in the data, it lacks the scientific intuition to discover equations that align with established physical principles or domain-specific terminology. Interestingly, PySR shows relatively better performance on physics problems, achieving modest symbolic accuracy of $4.54\\%$ on LSR-Synth Physics and $8.11\\%$ on LSR-Transform (which is based on Feynman physics equations). This suggests that physics problems may contain mathematical patterns that are more aligned with the dictionary design in PySR. So they can be discovered better through the data-driven search pipeline designed in PySR. These findings strengthen the motivation for LLM-based scientific equation discovery and demonstrate that LLM-SRBench successfully captures challenges in equation discovery that traditional symbolic regression methods cannot adequately address through numerical data-driven optimization alone.", + "bbox": [ + 83, + 378, + 888, + 561 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/fb11f34bbb115c03e1386d66501627f890a455552d3792179b4cbe06566017dc.jpg", + "table_caption": [ + "Table 3. Performance comparison between LLM-based methods and state-of-the-art non-LLM symbolic regression baseline PySR on LLM-SRBench. SA = Symbolic Accuracy (%), Acc0.1 = Accuracy to tolerance 0.1 (%)." + ], + "table_footnote": [], + "table_body": "
Dataset (Metric)LLM-SR (best) SA / Acc0.1LaSR (best) SA / Acc0.1SGA (best) SA / Acc0.1PySR SA / Acc0.1
LSR-Transform31.53 / 39.6412.61 / 50.459.91 / 8.118.11 / 56.76
LSR-Synth Chemistry11.11 / 66.662.77 / 38.920 / 16.660 / 41.67
LSR-Synth Biology25.30 / 58.338.33 / 20.834.16 / 12.510 / 25.0
LSR-Synth Physics9.91 / 36.369.91 / 31.814.54 / 9.094.54 / 29.55
LSR-Synth Material Science20.24 / 88.2828.12 / 72.040 / 36.110 / 68.0
", + "bbox": [ + 168, + 619, + 805, + 742 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/980926fc19c4a5dbe8a321ba11ada09393daa23e399bacfb2e5b2bb0db7cc03b.jpg", + "table_caption": [ + "Table 4: LSR-Synth mathematical equations for each scientific domain." + ], + "table_footnote": [], + "table_body": "
DomainEquation IDEquation
ChemistryCKR1-kA(t)2+kzA(t)2/(βA(t)4+1)
CKR2-kA(t)2-kA(t)+kw cos(log(A(t)+1))
CKR3-kA(t)+kw cos(log(A(t)+1))
", + "bbox": [ + 94, + 791, + 879, + 887 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Continued on next page", + "bbox": [ + 707, + 891, + 867, + 906 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 475, + 922, + 495, + 934 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/49bbe1d88c7d1016d1299e0446c3a6af030be9f0f4775174147f6c3751655ae0.jpg", + "table_caption": [ + "Table 4 - continued from previous page" + ], + "table_footnote": [], + "table_body": "
DomainEquation IDEquation
CKR4-kA(t)2-kA(t) exp(-ks t)+kw cos(log(A(t)+1))
CKR5-kA(t)2+kqA(t) log(γt+1)
CKR6-k√(A(t)+kfA(t)0.33
CKR7-kA(t) exp(-ks t)+km sin(√A(t))
CKR8-kA(t) exp(-ks t)+kw cos(log(A(t)+1))
CKR9-kA(t)2-kA(t)+kt sin(log(A(t)+1))
CKR10-k√A(t)+kw cos(log(A(t)+1))
CKR11-kA(t)2+kt sin(log(A(t)+1))
CKR12-kA(t)2+km sin(√A(t))
CKR13-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR14-kA(t)+kp sin(ωA(t))
CKR15-k√A(t)-kA(t) exp(-ks t)+kp sin(ωA(t))
CKR16-k√A(t)-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR17-kA(t)+kfA(t)0.33
CKR18-kA(t) exp(-ks t)+kfA(t)0.33
CKR19-kA(t)2+kp sin(ωA(t))
CKR20-kA(t)2-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR21-kA(t) exp(-ks t)+kp sin(ωA(t))
CKR22-kA(t) exp(-ks t)+kqA(t) log(γt+1)
CKR23-kA(t)2-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR24-k√A(t)+kp sin(ωA(t))
CKR25-k√A(t)-kA(t)2+kfA(t)0.33
CKR26-kA(t)+kt sin(log(A(t)+1))
CKR27-kA(t)2-kA(t) exp(-ks t)+km sin(√A(t))
CKR28-kA(t)2-kA(t) exp(-ks t)+kfA(t)0.33
CKR29-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR30-kA(t)-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR31-kA(t)-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR32-k√A(t)-kA(t)+kw cos(log(A(t)+1))
CKR33-kA(t)-kA(t) exp(-ks t)+kfA(t)0.33
CKR34-k√A(t)-kA(t)2+kt sin(log(A(t)+1))
CKR35-kA(t)2+kfA(t)0.33
CKR36-kA(t)+kqA(t)log(γt+1)
", + "bbox": [ + 96, + 104, + 877, + 878 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Continued on next page", + "bbox": [ + 707, + 883, + 867, + 898 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 475, + 922, + 496, + 934 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/39fd3cc32845900a08aae36a6154ecdf290fd268ff7603ccbdc4f582bcc25f59.jpg", + "table_caption": [ + "Table 4 - continued from previous page" + ], + "table_footnote": [], + "table_body": "
DomainEquation IDEquation
BiologyBPG1r(1-P(t)/K0)P(t)+rP(t)0.33
BPG2rP(t)exp(-γt)+rP(t)2/(αP(t)+1)
BPG3βP(t)sin(ωt)+rP(t)exp(-γt)
BPG4r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
BPG5r(1-P(t)/K0)P(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG6r(1-P(t)/K0)P(t)+rP(t)2/(αP(t)+1)
BPG7-QαP(t)+r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG8r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+rP(t)0.33
BPG9r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG10r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
BPG11rP(t)0.33+rP(t)
BPG12r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)exp(-γt)
BPG13βP(t)sin(ωt)+r(1-P(t)/K0)P(t)
BPG14r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG15r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)+rP(t)exp(-γt)
BPG16rP(t)0.33+rP(t)exp(-γt)
BPG17r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG18r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)0.33
BPG19βP(t)sin(ωt)+r(1-P(t)/K0)P(t)+rP(t)
BPG20r(1-P(t)/K0)P(t)+rP(t)/tα
BPG21r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG22r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)/tα
BPG23r(1-exp(-γP(t)))P(t)+rP(t)exp(-γt)
BPG24r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
PhysicsPO1F0sin(t)-βsin(v(t))-ω02x(t)3-ω02x(t)exp(-|x(t)|)
PO2F0sin(t)-ω02x(t)-ω02x(t)exp(-|x(t)|)
PO3-αv(t)3-μ(1-x(t)2)v(t)-ω02x(t)-ω02x(t)exp(-|x(t)|)
PO4F0sin(t)-βsin(v(t))-2βv(t)
PO5F0sin(t)-αv(t)3-ω02(γ|v(t)|0.33+1)x(t)-ω02x(t)
PO6-βsin(v(t))-2βv(t)-ω02(γ|v(t)|0.33+1)x(t)-ω02x(t)3-ω02x(t)
PO7-βlog(|v(t)|+1)-2βv(t)-ω02x(t)3
PO8-αv(t)3-β|v(t)|0.33-ω02x(t)3
", + "bbox": [ + 96, + 104, + 877, + 872 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Continued on next page", + "bbox": [ + 707, + 876, + 867, + 891 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/f5d825a3634b5f016ef48b7a8e3de61dc5544258d05e1729a47a488b41937f63.jpg", + "table_caption": [ + "Table 4 - continued from previous page" + ], + "table_footnote": [], + "table_body": "
DomainEquation IDEquation
PO9-β|v(t)|0.33 - ω02x(t)3
PO10F0sin(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)
PO11F0sin(t) - ω02(γt+1)x(t) - ω02x(t)3 - ω02x(t)
PO12-βsin(v(t)) - ω02(γt+1)x(t) - ω02x(t)3
PO13F0sin(t) - αv(t)3 - β|v(t)|0.33 - ω02(γt+1)x(t) - ω02x(t)
PO14F0sin(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO15F0sin(t) - βlog(|v(t)| + 1) - βsin(v(t)) - 2βv(t) - μ(1-x(t)2)v(t)
PO16F0sin(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t) - ω02x(t) exp(-|x(t)|)
PO17F0sin(t) - βsin(x(t))v(t) - βsin(v(t)) - ω02x(t)3
PO18F0sin(t) - βsin(x(t))v(t) - 2βv(t) - ω02x(t)
PO19-βsin(x(t))v(t) - ω02x(t)
PO20-2βv(t) - ω02x(t) exp(-|x(t)|)
PO21-αv(t)3 - β log(|v(t)| + 1) - 2βv(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO22F0sin(t) - βsin(x(t))v(t)
PO23-2βv(t) - β exp(-|x(t)|)v(t) - μ(1-x(t)2)v(t) - ω02x(t)3
PO24F0sin(t) - βlog(|v(t)| + 1) - ω02x(t) exp(-|x(t)|)
PO25F0sin(t) - αv(t)3 - β log(|v(t)| + 1)
PO26F0sin(t) - βsin(v(t))
PO27F0sin(t) - βlog(|v(t)| + 1) - 2βv(t) - ω02x(t)3
PO28F0sin(t) - αv(t)3 - 2βv(t) - βexp(-|v(t)|)v(t)
PO29-2βv(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3 - ω02x(t)
PO30-μ(1-x(t)2)v(t) - ω02(γt+1)x(t) - ω02x(t)3
PO31-αv(t)3 - βsin(x(t))v(t) - βsin(v(t)) - ω02x(t)3
PO32-ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3
PO33F0sin(t) - αv(t)3 - βexp(-|v(t)|)v(t) - ω02x(t)3
PO34-2βv(t) - μ(1-x(t)2)v(t) - ω02(γt+1)x(t) - ω02x(t)
PO35-2βv(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO36F0sin(t) - βsin(v(t)) - ω02(γ|v(t)|0.33 + 1)x(t)
PO37F0sin(t) - βexp(-|x(t)|)v(t)
PO38F0sin(t) - αv(t)3 - 2βv(t) - ω02(γt+1)x(t)
PO39-βsin(v(t)) - μ(1-x(t)2)v(t) - ω02x(t) exp(-|x(t)|)
PO40F0sin(t) - αv(t)3 - βexp(-|x(t)|)v(t) - μ(1-v(t)2)v(t)
PO41F0sin(t) - β|v(t)|0.33 - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3 - ω02x(t)
", + "bbox": [ + 96, + 104, + 877, + 878 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Continued on next page", + "bbox": [ + 707, + 883, + 867, + 897 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 477, + 922, + 496, + 934 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/4f27e9f084032a2c13ab16f8039019f7fcbfd78a412426da76d8a73012a4b88b.jpg", + "table_caption": [ + "Table 4 - continued from previous page" + ], + "table_footnote": [], + "table_body": "
DomainEquation IDEquation
PO42-μ(1-x(t)2)v(t)-ω02x(t)exp(-|x(t)|)
PO43F0sin(t)-αv(t)3-βsin(x(t))v(t)-2βv(t)
PO44F0sin(t)-βsin(x(t))v(t)-2βv(t)-μ(1-x(t)2)v(t)-ω02x(t) exp(-|x(t)|)
MaterialMatSci1E0ε(-αT(T-T0)+1)-β(T-T0)+εMη(T-T0)
MatSci2Hε3+KεNexp(-Q/(RT))+εηsin(T-T0)
MatSci3Hε3+η(T-T0)exp(-ε)
MatSci4Hε3+KεNexp(-Q/(RT))+ε3η(T-T0)
MatSci5E0ε2+η(T-T0)log(ε+1)
MatSci6E0ε(-αT(T-T0)+1)+KεNexp(-Q/(RT))+εMη(T-T0)
MatSci7E0ε(-αT(T-T0)+1)+εη(T-T0)2
MatSci8Hε3-β(T-T0)+η(T-T0)log(ε+1)
MatSci9E0ε(-αT(T-T0)+1)+εMη(T-T0)
MatSci10Hε3-β(T-T0)+ε3η(T-T0)
MatSci11Hε3+KεNexp(-Q/(RT))+εη(T-T0)2
MatSci12KεNexp(-Q/(RT))+ε3η(T-T0)
MatSci13E0ε(-αT(T-T0)+1)+KεNexp(-Q/(RT))+εηexp(-(T-T0)2)
MatSci14-β(T-T0)+εηexp(-(T-T0)2)
MatSci15-β(T-T0)+εMη(T-T0)
MatSci16E0ε(-αT(T-T0)+1)+εηexp(-(T-T0)2)
MatSci17E0ε2+εη(T-T0)2
MatSci18E0ε(-αT(T-T0)+1)-β(T-T0)+η(T-T0)log(ε+1)
MatSci19Hε3+η(T-T0)sin(ε)
MatSci20E0ε2-β(T-T0)+ε3η(T-T0)
MatSci21E0ε2+εηsin(T-T0)
MatSci22KεNexp(-Q/(RT))-β(T-T0)+η(T-T0)log(ε+1)
MatSci23E0ε(-αT(T-T0)+1)+Hε3+η(T-T0)sin(ε)
MatSci24KεNexp(-Q/(RT))+εηsin(T-T0)
MatSci25E0ε2+E0ε(-αT(T-T0)+1)+η(T-T0)log(ε+1)
", + "bbox": [ + 96, + 104, + 877, + 765 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 26 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Ground Truth: $\\frac{dP}{dt} = 0.9540\\left(1 - \\frac{P}{96.9069}\\right)P + 0.9540P^{\\frac{1}{3}}$", + "a) Predicted: Direct (Llama-3.1-8b)" + ], + "code_body": "def equation(t: np.ndarray, P: np.ndarray, params: np.ndarray) -> np.ndarray:\n ''' Mathematical function for Population growth rate\nArgs:\n t: A numpy array representing observations of Time.\n P: A numpy array representing observations of Population at time t.\n params: Array of numeric constants or parameters to be optimized\nReturn:\n A numpy array representing Population growth rate as the result of applying the mathematical function to the inputs.\n''' output = (\n params[0] * P # A linear term based on population size\n + params[1] * P ** 2 # A squared term for birth or death ratio\n + params[2] * t + params[3] * P * t # Linear and linear cross term\n + params[4]\n)\nreturn output", + "guess_lang": "python", + "bbox": [ + 192, + 224, + 782, + 426 + ], + "page_idx": 27 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "b) Predicted: SGA (Llama-3.1-8b)" + ], + "code_body": "class SymbolicEquation(nnModule):\n def __init__(self, r: float = 0.5, K: float = 100.0, time_scale: float = 0.05):\n Define trainable continuous parameters for differentiable optimization.\n Tentatively initialize the parameters with the default values in args.\n Params:\n r (float): growth rate.\n K (float): carrying capacity.\n time_scale (float): time-dependent coefficient.\n super().__init()\n self.r = nn_PARAMETER(torch.tensor(r, dtype=torch.float)) # (1,) \n self.K = nn_PARAMETER(torch.tensor(K, dtype=torch.float)) # (1,) \n self.time_scale = nn_PARAMETER(torch.tensor(time_scale, dtype=torch.float))\n # # (1,) \n # self.r = nn_PARAMETER(torch.tensor(r, dtype=torch.float)) \ndef forward(self, t: torch.Tensor, P: torch.Tensor) -> torch.Tensor:\n \"\"Mathematical function for Population growth rate\n Params:\n t: Time.\n P: Population at time t.\n Return:\n dP_dt: Population growth rate.\n \"\"\n # # (B,) \n dP_dt = self.r * torch.exp(-self.time_scale*t) * P * (1 - P / self.K) # (B,) \nreturn dP_dt", + "guess_lang": "python", + "bbox": [ + 187, + 484, + 787, + 847 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 475, + 922, + 495, + 934 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "c) Predicted: LaSR (Llama-3.1-8b)", + "bbox": [ + 197, + 261, + 415, + 273 + ], + "page_idx": 28 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {d P}{d t} = \\left(- 9 1 0. 3 1 - \\left(\\left(P \\cdot (P + \\sqrt {P})\\right) + \\log \\left((t + 4. 6 3 9 6) - (- 0. 6 6 0 9 5)\\right)\\right) \\cdot \\sin \\left(\\frac {- 0 . 9 5 5 7 2}{e ^ {t} - t}\\right)\\right) + 9 1 0. 3\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 287, + 777, + 315 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "d) Predicted: LLM-SR (Llama-3.1-8b)", + "bbox": [ + 196, + 358, + 429, + 371 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "def equation(t: np.ndarray, P: np.ndarray, params: np.ndarray) -> np.ndarray:", + "bbox": [ + 184, + 387, + 699, + 404 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Mathematical function for Population growth rate with a logistic growth model, decay, and periodicity.", + "bbox": [ + 184, + 407, + 771, + 429 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Args:", + "bbox": [ + 205, + 440, + 241, + 448 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "t: A numpy array representing observations of Time.", + "bbox": [ + 233, + 450, + 571, + 459 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "P: A numpy array representing observations of Population at time t.", + "bbox": [ + 233, + 460, + 678, + 470 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "params: Array of numeric constants or parameters to be optimized (k, M, r, a, b, A, w, tau)", + "bbox": [ + 186, + 470, + 785, + 489 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Returns:", + "bbox": [ + 205, + 501, + 259, + 508 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "A numpy array representing Population growth rate as the result of applying the mathematical function to the inputs.", + "bbox": [ + 184, + 511, + 759, + 531 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "1 1", + "bbox": [ + 205, + 531, + 228, + 537 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Apply transformations to inputs (if needed)", + "bbox": [ + 205, + 541, + 504, + 551 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "$t = t / np.\\max(t)$ #Normalize time", + "bbox": [ + 205, + 551, + 441, + 561 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "P = P / np.max(P) #Normalize population", + "bbox": [ + 205, + 561, + 480, + 571 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Compute the rates using logistic growth model", + "bbox": [ + 205, + 583, + 519, + 592 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "$\\mathrm{dP\\_dt} = \\mathrm{params}[0] * \\mathrm{P} * (1 - \\mathrm{P}) - \\mathrm{params}[1] * \\mathrm{P} - \\mathrm{params}[2] * \\mathrm{P}**\\mathrm{params}[3]$", + "bbox": [ + 205, + 593, + 699, + 603 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Compute the periodicity factor", + "bbox": [ + 205, + 614, + 421, + 623 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "periodic_factor = params[4] * np.sin(param[5] * (t - params[6]) + params[7])", + "bbox": [ + 205, + 625, + 717, + 633 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Combine growth rate and periodicity", + "bbox": [ + 205, + 645, + 455, + 654 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "$\\mathrm{dP\\_dt\\_with\\_periodicity} = \\mathrm{dP\\_dt} * \\mathrm{periodic\\_factor}$", + "bbox": [ + 205, + 655, + 527, + 665 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "return dP_dt_with_periodicity", + "bbox": [ + 205, + 676, + 401, + 686 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Figure 14. Example of output hypotheses from different LLM scientific equation discovery methods for BPG0 problem in LSR-Synth biology domain.", + "bbox": [ + 83, + 712, + 883, + 739 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 475, + 922, + 495, + 934 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Ground Truth:", + "bbox": [ + 253, + 135, + 346, + 147 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\n0. 1 9 \\cdot A (t) + 0. 1 9 \\cdot \\cos (\\log (A (t) + 1))\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 155, + 609, + 171 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "a) Predicted: Direct (Llama-3.1-8b)", + "text_level": 1, + "bbox": [ + 204, + 198, + 426, + 212 + ], + "page_idx": 29 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "def equation(t: np.ndarray, A: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Rate of change of concentration (Growth, Reaction or Decay)\n Args:\n t: A numpy array representing observations of Time.\n A: A numpy array representing observations of Concentration at time t.\n params: Array of numeric constants or parameters to be optimized.\n - params[0]: k (Rate constant, e.g., 0.1 min^-1)\n - params[1]: C0 (initial concentration, e.g., 10) for decay or growth\n models\n Return:\n A numpy array representing Rate of change of concentration as the result of applying the mathematical function to the inputs.\n - equation implemented under assumption that k*A represents linear decay, or linear growth.\n output = params[0] * A + params[1] * params[2]\n return output", + "guess_lang": "python", + "bbox": [ + 200, + 223, + 750, + 422 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "b) Predicted: SGA (Llama-3.1-8b)", + "text_level": 1, + "bbox": [ + 207, + 474, + 419, + 488 + ], + "page_idx": 29 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "class SymbolicEquation(nnModule): def init(self,k:float $= 9.00\\mathrm{e} - 02$ A_0:float $= 9.60\\mathrm{e} - 01$ A_t:float $= 9.60\\mathrm{e} - 01$ .. Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args. Args: k(float):time-dependent reaction rate constant. A_0(float):initial concentration. A_t(float):time-dependent change in concentration. super().__init_() #(1,) self.k $\\equiv$ nn.Parameters(torch.tensor(k,dtype $\\coloneqq$ torch.float)) self.A_0 $\\equiv$ nn.Parameterrtorch.tensor(A_0,dtype $\\coloneqq$ torch.float) # (1,) self.A_t $\\equiv$ nn.Parameterrtorch.tensor(A_t,dtype $\\coloneqq$ torch.float) # (1,) def forward(self,t:torch.Tensor,A:torch.Tensor) -> torch.Tensor: \"\"Mathematical function for Rate of change of concentration in chemistry reaction kinetics Args: t:Time.#(B,1) A:Concentration at time t.#(B,1) Return: dA_dt:Rate of change of concentration in chemistry reaction kinetics. (# (B,1)", + "guess_lang": "txt", + "bbox": [ + 212, + 500, + 759, + 839 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "c) Predicted: LaSR (Llama-3.1-8b)", + "bbox": [ + 279, + 329, + 496, + 344 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\left(- 0. 0 0 4 0 4 0 2 / \\left(1. 3 2 2 2 ^ {t}\\right)\\right) * \\left(\\left(A ^ {t}\\right) ^ {\\cos \\left(\\left(\\left(t + \\left(\\left(A / (A / A)\\right) - 0. 0 4 3 4 7 7\\right)\\right) ^ {A}\\right) - A\\right) + \\left(\\left(0. 7 5 6 2 9 ^ {1. 2 0 2 8} t\\right)\\right)\\right)} \\left. \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 251, + 354, + 715, + 375 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "d) Predicted: LLM-SR (Llama-3.1-8b)", + "bbox": [ + 194, + 417, + 429, + 431 + ], + "page_idx": 30 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "def equation(t: np.ndarray, A: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Rate of change of concentration in chemistry reaction kinetics\n Args:\n t: A numpy array representing observations of Time.\n A: A numpy array representing observations of Concentration at time t.\n params: Array of numeric constants or parameters to be optimized\n Return:\n A numpy array representing Rate of change of concentration in chemistry reaction kinetics as the result of applying the mathematical function to the inputs.\n output = params[0] * A + params[1] * A**2 + params[2] * A**3 + params[3] * t + params[4]\n return output", + "guess_lang": "python", + "bbox": [ + 197, + 454, + 763, + 613 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Figure 15. Example of output hypotheses from different LLM scientific equation discovery methods for CKR2 problem in LSR-Synth chemistry domain.", + "bbox": [ + 84, + 637, + 885, + 665 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 477, + 922, + 493, + 934 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Ground Truth:", + "text_level": 1, + "bbox": [ + 251, + 99, + 346, + 112 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n3 1. 1 2 \\cdot \\varepsilon \\cdot (- 0. 0 5 \\cdot (T - 3 1 6. 1 9) + 1) - 0. 2 3 \\cdot (T - 3 1 6. 1 9) + \\varepsilon^ {0. 6 0} \\cdot 3. 6 3 \\cdot (T - 3 1 6. 1 9)\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 114, + 725, + 131 + ], + "page_idx": 31 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "a) Predicted: Direct (Llama-3.1-8b)" + ], + "code_body": "def equation(eppsilon: np.ndarray, T: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Stress\nArgs:\n epsilon: A numpy array representing observations of Strain.\n T: A numpy array representing observations of Temperature.\n params: Array of numeric constants or parameters to be optimized\nReturn:\n A numpy array representing Stress as the result of applying the mathematical function to the inputs.\n \"\"\n reference_temperature = 0\n sigma_0 = params[0]\n alpha = params[1]\n n = params[2]\n linear_epsilon_term = params[3] * epsilon\n multiplicative_epsilon_term = params[4] * epsilon\n linear_temperature_term = params[5] * T\n output = sigma_0 * (1 + alpha * (T - reference_temperature)) * epsilon**\n linear_epsilon_term + multiplicative_epsilon_term + linear_temperature_term\n return output", + "guess_lang": "python", + "bbox": [ + 200, + 188, + 738, + 406 + ], + "page_idx": 31 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "b) Predicted: SGA (Llama-3.1-8b)" + ], + "code_body": "class SymbolicEquation(nnModule): def__init__(self,c_0:float $= 9.83\\mathrm{e} - 01$ ,c_1:float $= 9.86\\mathrm{e} - 01$ ,c_2:float $= 9.83\\mathrm{e} - 01$ a:float $= -3.73\\mathrm{e} - 03$ ,b:float $= -1.11\\mathrm{e} - 02$ ,d:float $= 2.63\\mathrm{e} - 02$ : Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args. \nArgs: c_0(float): coefficient. c_1(float): coefficient. c_2(float): coefficient. a(float): coefficient for quadratic term. b(float): coefficient for exponential term. d(float): coefficient for non-linear relationship between Strain and Temperature. super()._init_(self.c_0=nn.Parameters(torch.tensor(c_0,dtype $\\equiv$ torch.float)) # (1,) self.c_1 = nn.Parameter(torch.tensor(c_1,dtype $\\equiv$ torch.float)) # (1,) self.c_2 = nn.Parameter(torch.tensor(c_2,dtype $\\equiv$ torch.float)) # (1,) self.a = nn.Parameter(torch.tensor(a,dtype $\\equiv$ torch.float)) # (1,) self.b = nn.Parameter(torch.tensor(b,dtype $\\equiv$ torch.float)) # (1,) self.d = nn.Parameter(torch.tensor(d,dtype $\\equiv$ torch.float)) # (1,) \ndef forward(self,epsilon:torch.Tensor,T:torch.Tensor) -> torch.Tensor:\"\"\"Mathematical function for Stress \nArgs: epsilon:Strain.#(B,1) T:Temperature.#(B,1) \nReturn: sigma:Stress.#(B,1) \"\"\" # (B,1) sigma $=$ self.c_0 $^+$ self.c_1 \\* torch.exp(self.a \\* epsilon $^+$ self.b \\*T)+ self.c_2 \\* torch.cos(self.d \\* epsilon \\*T) return sigma", + "guess_lang": "python", + "bbox": [ + 212, + 483, + 733, + 877 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "c) Predicted: LaSR (Llama-3.1-8b)", + "bbox": [ + 281, + 324, + 500, + 338 + ], + "page_idx": 32 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\left(\\sqrt {\\epsilon} \\cdot (- 7 1 2. 3 6) + \\left(\\left(\\frac {\\sqrt {\\epsilon}}{1 . 3 7 9 2 ^ {\\epsilon}} \\cdot 2. 2 7 9 8\\right) \\cdot (T - \\epsilon) + \\epsilon\\right) + 6. 8 1 2 5\\right) \\cdot 1. 5 0 7 6 ^ {\\sqrt {\\epsilon}}\\right) - \\sin (\\log (T))\n$$\n", + "text_format": "latex", + "bbox": [ + 245, + 345, + 728, + 373 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "d) Predicted: LLM-SR (Llama-3.1-8b)" + ], + "code_body": "def equation(epsilon: np.ndarray, T: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Stress\nArgs:\n epsilon: A numpy array representing observations of Strain.\n T: A numpy array representing observations of Temperature.\n params: Array of numeric constants or parameters to be optimized\nReturn:\n A numpy array representing Stress as the result of applying the mathematical function to the inputs.\n>>> output = params[0] * (epsilon ** (1/params[4]) - 1) + \\\n params[1] * (epsilon ** (1/params[6]) - 1) + \\\n params[2] * (epsilon ** (1/params[8]) - 1) * np.exp.params[3] * epsilon) + \\\n params[5] * epsilon + params[6] * T + params[7]\nreturn output", + "guess_lang": "python", + "bbox": [ + 200, + 441, + 766, + 612 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Figure 16. Example of output hypotheses from different LLM scientific equation discovery methods for MatSci0 problem in LSR-Synth material science domain.", + "bbox": [ + 84, + 645, + 883, + 672 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Ground Truth:", + "bbox": [ + 253, + 117, + 346, + 130 + ], + "page_idx": 33 + }, + { + "type": "equation", + "text": "\n$$\nF _ {0} \\cdot \\sin (t) - \\beta \\cdot \\sin (v (t)) - \\omega_ {0} ^ {2} \\cdot x (t) ^ {3} - \\omega_ {0} ^ {2} \\cdot x (t) \\cdot e ^ {- | x (t) |}\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 133, + 689, + 154 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "a) Predicted: Direct (Llama-3.1-8b)", + "text_level": 1, + "bbox": [ + 204, + 181, + 426, + 195 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "def equation(x: np.ndarray, t: np.ndarray, v: np.ndarray, params: np.ndarray) -> np.ndarray:", + "bbox": [ + 200, + 205, + 720, + 226 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "\"\"Mathematical function for Acceleration", + "bbox": [ + 220, + 226, + 495, + 234 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Args:", + "bbox": [ + 220, + 246, + 254, + 255 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "x: A numpy array representing observations of Position at time t.", + "bbox": [ + 245, + 256, + 666, + 265 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "t: A numpy array representing observations of Time.", + "bbox": [ + 246, + 266, + 575, + 275 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "v: A numpy array representing observations of Velocity at time t.", + "bbox": [ + 246, + 276, + 666, + 285 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "params: Array of numeric constants or parameters to be optimized", + "bbox": [ + 246, + 286, + 661, + 295 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Return:", + "bbox": [ + 220, + 306, + 267, + 314 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "A numpy array representing Acceleration as the result of applying the mathematical function to the inputs.", + "bbox": [ + 200, + 315, + 694, + 334 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "1", + "bbox": [ + 220, + 335, + 243, + 340 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Since acceleration is Force/mass and Force is mass*velocity change,", + "bbox": [ + 220, + 344, + 666, + 354 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "we represent this equation as", + "bbox": [ + 220, + 354, + 423, + 364 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "a = dv/dt or in our representation format, given our position x it will correspond", + "bbox": [ + 220, + 366, + 766, + 383 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "a = d^2x/dt^2 or by simple numeric analogy 4th order approximation involving high degree dependency functions:", + "bbox": [ + 200, + 383, + 759, + 404 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "output = (params[0]*x**4 + params[1]*x**3 + params[2]*x**2 + params[3]*x + params[4]) + params[5]*v**2 + params[6]*t + params[7]*(x**2 + v)", + "bbox": [ + 200, + 405, + 702, + 424 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "return output", + "bbox": [ + 220, + 425, + 308, + 434 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "b) Predicted: SGA (Llama-3.1-8b)", + "text_level": 1, + "bbox": [ + 207, + 463, + 421, + 477 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "class SymbolicEquation(nnModule): def init(self,c_0: float $= 9.50\\mathrm{e} - 01$ ,c_1: float $= 9.50\\mathrm{e} - 01$ ,c_2: float $= 9.50\\mathrm{e} - 01$ ,c_3: float $= 1.05\\mathrm{e} + 00$ :", + "guess_lang": "txt", + "bbox": [ + 212, + 488, + 733, + 535 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args.", + "bbox": [ + 256, + 539, + 717, + 559 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Args:", + "bbox": [ + 259, + 569, + 292, + 577 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "c_0(float): coefficient.", + "bbox": [ + 285, + 578, + 447, + 587 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "c 1(float): coefficient.", + "bbox": [ + 285, + 588, + 444, + 595 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "c_2(float); coefficient.", + "bbox": [ + 285, + 597, + 447, + 604 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "c 3 (float): coefficient.", + "bbox": [ + 285, + 607, + 447, + 616 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "1 1", + "bbox": [ + 259, + 618, + 282, + 625 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "super().__init_(self.c 0 = nn.Parameter(torch.tensor(c 0, dtype=torch.float)) # (1,)", + "bbox": [ + 259, + 627, + 704, + 646 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "self.c1 = nn_PARAMETER(torch.tensor(c1, dtype=torch.float)) # (1,)", + "bbox": [ + 259, + 647, + 702, + 655 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "self.c_2 = nn_PARAMETER(torch:tensor(c_2, dtype=torch.float)) # (1,)", + "bbox": [ + 259, + 657, + 702, + 666 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "self.c_3 = nn_PARAMETER(torch.tensor(c_3, dtype=torch.float)) # (1,)", + "bbox": [ + 259, + 667, + 702, + 676 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "def forward(self, x: torch.Tensor, t: torch.Tensor, v: torch.Tensor) -> torch.Tensor:", + "bbox": [ + 233, + 695, + 694, + 715 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "\"\"Mathematical function for Acceleration in Nonl-linear Harmonic Oscillator", + "bbox": [ + 259, + 715, + 751, + 724 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Args:", + "bbox": [ + 259, + 736, + 292, + 744 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "x: Position at time t. # (B,)", + "bbox": [ + 285, + 746, + 478, + 753 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "t: Time. # (B,)", + "bbox": [ + 285, + 756, + 388, + 763 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "v: Velocity at time t. # (B,)", + "bbox": [ + 285, + 766, + 478, + 775 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Return:", + "bbox": [ + 259, + 786, + 305, + 794 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "dv dt: Acceleration in Nonl-linear Harmonic Oscillator. # (B,)", + "bbox": [ + 285, + 795, + 691, + 804 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "1 1", + "bbox": [ + 259, + 805, + 281, + 811 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Non-linear relationship between x, t, and v", + "bbox": [ + 259, + 814, + 552, + 824 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "dv_dt = self.c_0 * torch.exp(-self.c_1 * x) + self.c_2 * torch.cos(self.c_3 * t + self.c_3 * x) # (B, )", + "bbox": [ + 200, + 825, + 756, + 844 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "return dv_dt", + "bbox": [ + 259, + 845, + 339, + 854 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 475, + 922, + 496, + 934 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "c) Predicted: LaSR (Llama-3.1-8b)", + "bbox": [ + 279, + 287, + 496, + 301 + ], + "page_idx": 34 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\frac {\\left((x + x) \\cdot \\left(\\left(\\frac {\\sqrt {x ^ {\\mathrm {l i n e s}} - \\frac {x}{\\sin x}}}{x}\\right) \\cdot \\sin (1 . 1 4 7 8 \\cdot t) - x\\right) \\cdot \\sin (x)\\right)}{1 . 7 0 5 2} - \\sin (0. 0 0 3 2 8 2 7)\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 311, + 609, + 342 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "d) Predicted: LLM-SR (Llama-3.1-8b)", + "bbox": [ + 194, + 380, + 429, + 393 + ], + "page_idx": 34 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "def equation(x: np.ndarray, t: np.ndarray, v: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Acceleration", + "guess_lang": "python", + "bbox": [ + 200, + 412, + 720, + 441 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Args:", + "bbox": [ + 220, + 452, + 254, + 460 + ], + "page_idx": 34 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "x: A numpy array representing observations of Position at time t. \nt: A numpy array representing observations of Time. \nv: A numpy array representing observations of Velocity at time t. \nparams: Array of numeric constants or parameters to be optimized", + "guess_lang": "txt", + "bbox": [ + 245, + 463, + 666, + 502 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Return:", + "bbox": [ + 220, + 511, + 266, + 518 + ], + "page_idx": 34 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "A numpy array representing Acceleration as the result of applying the mathematical function to the inputs. \n```python\n```\n# Since acceleration is Force/mass and Force is mass*velocity change,\n# we represent this equation as\n# a = dv/dt or in our representation format, given our position x it will correspond to\n# a = d^2x/dt^2 or by simple numeric analogy 4th order approximation involving high degree dependency functions:\noutput = (params[0]*x**4 + params[1]*x**3 + params[2]*x**2 + params[3]*x + params[4])\n+ params[5]*v**2 + params[6]*t + params[7]*(x**2 + v)\nreturn output", + "guess_lang": "python", + "bbox": [ + 200, + 521, + 777, + 640 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Figure 17. Example of output hypotheses from different LLM scientific equation discovery methods for PO0 problem in LSR-Synth physics domain.", + "bbox": [ + 84, + 679, + 885, + 708 + ], + "page_idx": 34 + }, + { + "type": "header", + "text": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models", + "bbox": [ + 171, + 56, + 799, + 70 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 34 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10415/6118f0df-c806-4166-9486-ac165b1c4226_model.json b/data/2025/2504_10xxx/2504.10415/6118f0df-c806-4166-9486-ac165b1c4226_model.json new file mode 100644 index 0000000000000000000000000000000000000000..8bdc7a3c2b6614fb9bfa856e5d50ba99f4f7bba4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/6118f0df-c806-4166-9486-ac165b1c4226_model.json @@ -0,0 +1,5528 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.112, + 0.11, + 0.861, + 0.157 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.199, + 0.788, + 0.237 + ], + "angle": 0, + "content": "Parshin Shojaee* 1 Ngoc-Hieu Nguyen* 2 Kazem Meidani 34 Amir Barati Farimani 3 Khoa D Doan 2 Chandan K Reddy 1" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.24, + 0.794, + 0.255 + ], + "angle": 0, + "content": "Website: https://github.com/deep-symbolic-mathematics/llm-srbench" + }, + { + "type": "title", + "bbox": [ + 0.242, + 0.282, + 0.321, + 0.297 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.304, + 0.445, + 0.789 + ], + "angle": 0, + "content": "Scientific equation discovery has long been a cornerstone of scientific progress, enabling the derivation of laws governing natural phenomena. Recently, Large Language Models (LLMs) have gained interest for this task due to their potential to leverage embedded scientific knowledge for hypothesis generation. However, it is difficult to assess the true discovery capabilities of these methods because existing benchmarks often use well-known equations. This makes them vulnerable to memorization by LLMs and results in inflated performance metrics that do not reflect genuine discovery. In this paper, we introduce LLM-SRBench, a comprehensive benchmark with 239 challenging problems across four scientific domains specifically designed to evaluate LLM-based scientific equation discovery methods while preventing trivial memorization. Our benchmark comprises two main categories: LSR-Transform, which transforms common physical models into less common mathematical representations to test reasoning beyond memorized forms, and LSR-Synth, which introduces synthetic, discovery-driven problems requiring data-driven reasoning. Through extensive evaluation of several state-of-the-art methods, using both open and closed LLMs, we find that the best-performing system so far achieves only \\(31.5\\%\\) symbolic accuracy. These findings highlight the challenges of scientific equation discovery, positioning LLM-SRBench as a valuable resource for future research." + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.284, + 0.871, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.497, + 0.455, + 0.888, + 0.527 + ], + "angle": 0, + "content": "Figure 1. Error analysis comparing simple LLM sampling (Llama-3.1-8B) on 100 Feynman problems versus LLM-SRBench datasets (LSR-Transform and LSR-Synth). The sharp drops in numeric error curves and considerably lower symbolic error for Feynman problems suggest memorization rather than gradual discovery." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.562, + 0.63, + 0.577 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.582, + 0.888, + 0.763 + ], + "angle": 0, + "content": "Equation discovery, the process of uncovering symbolic mathematical expressions from observational data, has been a cornerstone of scientific advancement. This task, also known as symbolic regression (SR), goes beyond mere data-driven predictive modeling by seeking interpretable mathematical relations that reveal the underlying mechanisms of natural phenomena. When scientists derive mathematical equations from empirical data, they gain more than just predictive power – they obtain insights into fundamental physical principles, enable extrapolation beyond observed data, and facilitate knowledge transfer across scientific domains (Langley, 1981; Schmidt & Lipson, 2009)." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.77, + 0.889, + 0.906 + ], + "angle": 0, + "content": "Standard approaches to equation discovery have primarily relied on genetic programming (GP) and evolutionary algorithms (Cranmer, 2023; La Cava et al., 2021), which represent mathematical expressions as trees and navigate the vast space of possible equations through evolutionary search techniques. However, these methods face two fundamental challenges. First, the NP-hard nature of equation discovery (Virgolin & Pissis, 2022) makes their random mutation and crossover operations computationally prohibitive across" + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.815, + 0.475, + 0.856 + ], + "angle": 0, + "content": "*Equal contribution ¹Virginia Tech ²VinUniversity ³Carnegie Mellon University ⁴Capital One. Correspondence to: Parshin Shojaee ." + }, + { + "type": "page_footnote", + "bbox": [ + 0.085, + 0.866, + 0.475, + 0.906 + ], + "angle": 0, + "content": "Proceedings of the \\(42^{nd}\\) International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s)." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.286, + 0.058, + 0.711 + ], + "angle": 270, + "content": "arXiv:2504.10415v2 [cs.CL] 7 Jun 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "image", + "bbox": [ + 0.148, + 0.085, + 0.166, + 0.102 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.178, + 0.088, + 0.27, + 0.098 + ], + "angle": 0, + "content": "Goal / Instruction" + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.104, + 0.328, + 0.131 + ], + "angle": 0, + "content": "- Discover the mathematical equation/law that describes [output variable] based on given [input features]." + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.131, + 0.332, + 0.166 + ], + "angle": 0, + "content": "- Use domain-specific knowledge of [the scientific field] and provided data samples to find an equation that is scientifically valid and fits the data well." + }, + { + "type": "image", + "bbox": [ + 0.144, + 0.175, + 0.166, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.178, + 0.18, + 0.272, + 0.189 + ], + "angle": 0, + "content": "Scientific Context" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.198, + 0.248, + 0.206 + ], + "angle": 0, + "content": "Problem description" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.207, + 0.301, + 0.215 + ], + "angle": 0, + "content": "Variable names and descriptions" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.215, + 0.202, + 0.224 + ], + "angle": 0, + "content": "Example:" + }, + { + "type": "list", + "bbox": [ + 0.143, + 0.207, + 0.301, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.227, + 0.332, + 0.264 + ], + "angle": 0, + "content": "Find an equation in the field of classical mechanics that describes the mass \\((m)\\) needed to store energy in an oscillating system, given physical input variables: mean stored energy \\((E_{m})\\), driving frequency \\((\\omega)\\), natural frequency \\((\\omega_{n})\\) and amplitude \\((x)\\)." + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.276, + 0.166, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.184, + 0.279, + 0.212, + 0.288 + ], + "angle": 0, + "content": "Data" + }, + { + "type": "table", + "bbox": [ + 0.142, + 0.296, + 0.337, + 0.351 + ], + "angle": 0, + "content": "
\\( E_{\\mathrm {n}} \\)ω\\( \\omega_0 \\)xm
4.71.22.31.51.2
3.42.72.73.10.1
i
2.81.53.61.40.4
" + }, + { + "type": "image", + "bbox": [ + 0.365, + 0.084, + 0.394, + 0.104 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.4, + 0.088, + 0.491, + 0.098 + ], + "angle": 0, + "content": "Typical Workflow" + }, + { + "type": "image", + "bbox": [ + 0.412, + 0.113, + 0.434, + 0.129 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.442, + 0.113, + 0.539, + 0.133 + ], + "angle": 0, + "content": "LLM internal scientific knowledge" + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.135, + 0.434, + 0.149 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.438, + 0.138, + 0.543, + 0.148 + ], + "angle": 0, + "content": "Reasoning and planning" + }, + { + "type": "image", + "bbox": [ + 0.413, + 0.156, + 0.435, + 0.169 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.451, + 0.158, + 0.51, + 0.168 + ], + "angle": 0, + "content": "Programming" + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.181, + 0.586, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.415, + 0.28, + 0.438, + 0.3 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.442, + 0.285, + 0.545, + 0.294 + ], + "angle": 0, + "content": "Parameter Optimization" + }, + { + "type": "image", + "bbox": [ + 0.413, + 0.305, + 0.436, + 0.321 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.439, + 0.309, + 0.486, + 0.318 + ], + "angle": 0, + "content": "Simulation" + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.304, + 0.511, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.308, + 0.56, + 0.317 + ], + "angle": 0, + "content": "Experiments" + }, + { + "type": "image", + "bbox": [ + 0.421, + 0.325, + 0.443, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.447, + 0.331, + 0.537, + 0.339 + ], + "angle": 0, + "content": "Statistical Fit to Data" + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.085, + 0.66, + 0.103 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.673, + 0.088, + 0.735, + 0.098 + ], + "angle": 0, + "content": "Hypothesis" + }, + { + "type": "text", + "bbox": [ + 0.623, + 0.108, + 0.834, + 0.125 + ], + "angle": 0, + "content": "- Discovered mathematical equation represented by expressions, trees, programs, etc." + }, + { + "type": "text", + "bbox": [ + 0.624, + 0.134, + 0.785, + 0.143 + ], + "angle": 0, + "content": "- Supporting explanations / reasoning" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.154, + 0.718, + 0.161 + ], + "angle": 0, + "content": "\\(m = 4^{*}E n / (x^{**}2^{*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.162, + 0.747, + 0.17 + ], + "angle": 0, + "content": "(omega\\*\\*2 + omega_0\\*\\*2))" + }, + { + "type": "image", + "bbox": [ + 0.756, + 0.144, + 0.835, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.627, + 0.186, + 0.805, + 0.205 + ], + "angle": 0, + "content": "def equation(E_n, omega, omega_0, x, params): # Energy-mass ratio normalized by parameter numerator \\(=\\) params[0] \\(\\ast\\) E n" + }, + { + "type": "text", + "bbox": [ + 0.642, + 0.206, + 0.832, + 0.232 + ], + "angle": 0, + "content": "Combined frequency and amplitude scaling effects denominator \\(=\\) omega++2 \\(\\ast\\) x++2 + omega_0**2 \\(\\ast\\) x**2 m \\(=\\) numerator / denominator return m" + }, + { + "type": "title", + "bbox": [ + 0.665, + 0.25, + 0.722, + 0.259 + ], + "angle": 0, + "content": "Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.641, + 0.268, + 0.72, + 0.276 + ], + "angle": 0, + "content": "Data Fidelity:" + }, + { + "type": "text", + "bbox": [ + 0.681, + 0.277, + 0.769, + 0.285 + ], + "angle": 0, + "content": "In-Domain accuracy" + }, + { + "type": "text", + "bbox": [ + 0.664, + 0.286, + 0.808, + 0.294 + ], + "angle": 0, + "content": "- Out-of-Domain generalization" + }, + { + "type": "list", + "bbox": [ + 0.641, + 0.268, + 0.808, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.641, + 0.3, + 0.75, + 0.309 + ], + "angle": 0, + "content": "- Symbolic Accuracy:" + }, + { + "type": "text", + "bbox": [ + 0.664, + 0.31, + 0.812, + 0.317 + ], + "angle": 0, + "content": "Human expert/LLM evaluator" + }, + { + "type": "text", + "bbox": [ + 0.664, + 0.317, + 0.769, + 0.325 + ], + "angle": 0, + "content": "Scientific plausibility" + }, + { + "type": "text", + "bbox": [ + 0.664, + 0.326, + 0.744, + 0.335 + ], + "angle": 0, + "content": "Interpretability" + }, + { + "type": "list", + "bbox": [ + 0.664, + 0.31, + 0.812, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.641, + 0.341, + 0.774, + 0.35 + ], + "angle": 0, + "content": "Computational Efficiency" + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.374, + 0.887, + 0.43 + ], + "angle": 0, + "content": "Figure 2. Overview of the LLM-based Scientific Equation Discovery. The benchmark tasks (left) combine scientific context with numerical data. The discovery process (middle) iteratively leverages LLM's scientific knowledge and data-driven reasoning to generate hypotheses for underlying equations. Discovered hypotheses, represented as equation strings, trees, or programs, are then evaluated (right) using multiple metrics including data fidelity, symbolic accuracy, and computational efficiency." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.455, + 0.477, + 0.561 + ], + "angle": 0, + "content": "vast search spaces. Second, unlike human scientists who leverage their domain knowledge and expertise to guide hypothesis formation, these approaches are mostly purely data-driven, and isolated from existing scientific knowledge. These limitations have motivated researchers to develop methods that incorporate scientific domain knowledge into the equation discovery process." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.569, + 0.477, + 0.78 + ], + "angle": 0, + "content": "Large Language Models (LLMs) have recently emerged as a promising solution to these challenges, offering a new paradigm for scientific equation discovery. LLMs, trained on vast corpora of scientific literature, possess extensive embedded scientific knowledge. This has sparked significant interest in leveraging LLMs for scientific equation discovery, with several recent works demonstrating their potential (Shojae et al., 2024b; Ma et al., 2024; Grayeli et al., 2024; Merler et al., 2024; Du et al., 2024; Reddy & Shojaee, 2024; Zhang et al., 2024). These LLM-based approaches have shown to enhance the equation hypothesis generation process by incorporating scientific priors, guiding the exploration of equation search spaces more efficiently, and providing interpretable reasoning for the search process." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.787, + 0.476, + 0.893 + ], + "angle": 0, + "content": "Despite the promising potential of LLM-based equation discovery methods, their rigorous and robust evaluation still remains an open challenge. The current scientific equation discovery benchmarks are primarily represented by SRBench (La Cava et al., 2021) and SRSD (Matsubara et al., 2022). SRBench incorporates two key data groups for this purpose: the Feynman physics equations (Udrescu" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.455, + 0.888, + 0.591 + ], + "angle": 0, + "content": "& Tegmark, 2020), and Strogatz dynamical systems (La Cava et al., 2016; Strogatz, 2018). A notable extension to this framework is SRSD (Matsubara et al., 2022), which enhances the Feynman benchmark by incorporating physically meaningful sampling ranges for data points. However, these benchmarks exhibit significant limitations for the evaluation of LLM-based methods. Their problems are mostly based on known physics equations from textbooks, which makes them often subject to memorization by LLMs." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.599, + 0.889, + 0.901 + ], + "angle": 0, + "content": "As noted by (Shojaaee et al., 2024b), LLMs frequently succeed on these common equation discovery benchmarks through simple recitation based on variable names and problem descriptions, rather than the actual process of data-driven discovery and reasoning. Our analysis (shown in Fig. 1) also confirms this finding - the sudden drop in the numeric error curve within the first few iterations and significantly lower symbolic error on Feynman problems indicate memorized solutions rather than a meaningful search towards discovery. To mitigate this issue, (Shojaaee et al., 2024b; Ma et al., 2024) have introduced a handful of five custom-crafted problems designed to prevent memorization by manually modifying known physical models. While these efforts represent a step forward, the small scale and limited diversity of these problem sets are insufficient to provide a comprehensive evaluation framework for emerging LLM-based methods in scientific equation discovery. A more robust and systematic benchmark is needed to enable standardized evaluation and foster the development of innovative methods in this emerging field." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.086, + 0.477, + 0.569 + ], + "angle": 0, + "content": "In this paper, we introduce LLM-SRBench, a new benchmark designed to rigorously evaluate the capabilities of LLM-based scientific equation discovery methods. LLM-SRBench addresses the limitations of existing benchmarks by constructing problem sets that avoid trivial recitation while leveraging the scientific priors embedded in LLMs, simulating conditions akin to scientific discovery. The benchmark is structured around two main categories of problems, each targeting distinct aspects of equation discovery. The first category focuses on transforming common scientific problems, such as those from the Feynman equations, into different mathematical representations of the same underlying physical problem. By symbolically altering input-output mappings and generating less common mathematical forms for the same problem, we challenge LLM-based equation discovery to go beyond memorization of the common forms. This approach is motivated by recent findings on the fragility of LLMs' reasoning capabilities to unfamiliar representations of otherwise familiar problems (Mirzadeh et al., 2024; Xie et al., 2024; Wu et al., 2023). The second category extends the approach introduced by (Shojae et al., 2024b), which combines known terms in the underlying equation with synthetic, novel terms to create problems that go beyond memorization and demand data-driven reasoning. We expand this idea into a comprehensive set of benchmark problems spanning diverse scientific domains. These problems incorporate carefully designed synthetic terms that are both novel and plausible. We further verify the solvability of the generated equations using numerical solvers, ensuring that the benchmark problems remain grounded in physical feasibility while presenting meaningful challenges for LLM-based discovery methods." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.576, + 0.478, + 0.817 + ], + "angle": 0, + "content": "LLM-SRBench comprises 111 problems in the first category (LSR-Transform), and 128 problems in the second category (LSR-Synth), spanning four scientific domains: chemistry (36), biology (24), physics (43), and material science (25). We comprehensively benchmark state-of-the-art LLM-based scientific equation discovery methods with several LLM backbones on these datasets. Our experiments reveal several key insights into the capabilities and limitations of current LLM-based scientific equation discovery methods. Results show that the best model can only solve \\(31.5\\%\\) of problems on LSR-Transform and \\(28.1\\%\\) on LSR-Synth. This underscores the challenging nature of the tasks in LLM-SRBench and highlights its potential as a critical evaluation foundation for future LLM-based scientific equation discovery methods. Overall, the contributions of this work are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.845, + 0.478, + 0.907 + ], + "angle": 0, + "content": "- We introduce LLM-SRBench, the first comprehensive benchmark with 239 challenging problems across various scientific domains, designed to evaluate LLM-based scientific equation discovery methods." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.887, + 0.162 + ], + "angle": 0, + "content": "- We propose a novel benchmark design through alternative mathematical representations (LSR-Transform) and synthetic, discovery-driven problems (LSR-Synth) to ensure rigorous evaluation of scientific reasoning and discovery capabilities beyond LLM memorization." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.168, + 0.887, + 0.214 + ], + "angle": 0, + "content": "- Extensive experiments on state-of-the-art methods reveal performance peaks at \\(31\\%\\), highlighting the benchmark's challenging nature and its potential for future research." + }, + { + "type": "list", + "bbox": [ + 0.497, + 0.085, + 0.887, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.235, + 0.652, + 0.251 + ], + "angle": 0, + "content": "2. LLM-SRBench" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.261, + 0.888, + 0.442 + ], + "angle": 0, + "content": "We introduce LLM-SRBench, a novel benchmark designed to evaluate LLM-based methods for data-driven scientific equation discovery. As shown in Fig. 2, in this benchmark, a \"data-driven scientific equation discovery\" task is defined as follows: Given a task dataset \\(\\mathcal{D}\\), the corresponding scientific context \\(\\mathcal{C}\\), the objective is to derive a hypothesis \\(h\\) that represents the underlying mathematical relations behind the data with high precision and scientific plausibility. This process resembles the iterative search and refinement undertaken by human scientists, where LLMs act as optimizers, proposing and refining hypotheses based on both scientific knowledge and empirical data." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.453, + 0.644, + 0.467 + ], + "angle": 0, + "content": "2.1. LSR-Transform" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.476, + 0.889, + 0.643 + ], + "angle": 0, + "content": "This category is designed to evaluate whether LLM-based methods can discover equations in less common mathematical forms, avoiding reliance on memorization of well-known representations. This approach is motivated by the observation that LLMs often struggle with unfamiliar instantiations of otherwise familiar problems, as highlighted by recent studies on the fragility of LLM reasoning (Mirzadeh et al., 2024; Xie et al., 2024; Wu et al., 2023). By transforming existing benchmark problems into different mathematical representations, we challenge LLMs' capabilities in data-driven scientific equation discovery and reasoning." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.649, + 0.889, + 0.906 + ], + "angle": 0, + "content": "We build on the Feynman (Udrescu & Tegmark, 2020) benchmark (current standard benchmark in scientific equation discovery), which consists of 100 physics equations, and systematically transform these equations into alternative mathematical forms (examples in App. A.1). As demonstrated in Fig. 3(a), the transformation process involves seven key steps: 1) Equation Collection: We gather the original mathematical expressions, along with their input and output variables, and scientific problem descriptions from the Feynman benchmark. 2) Select Pivot Variable: For each equation, we choose an input feature to become the new target variable. 3) Feature-Target Transformation: We transform the dataset by switching the roles of the selected input feature and the original target variable. 4) Symbolic Transformation: Using the SymPy library in Python on the parsed expressions, we solve each equation with respect to the selected input variable, treating it" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "image", + "bbox": [ + 0.127, + 0.085, + 0.48, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.358, + 0.362, + 0.37 + ], + "angle": 0, + "content": "(a) LSR-Transform" + }, + { + "type": "image", + "bbox": [ + 0.487, + 0.084, + 0.844, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.632, + 0.357, + 0.712, + 0.37 + ], + "angle": 0, + "content": "(b) LSR-Synth" + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.385, + 0.888, + 0.442 + ], + "angle": 0, + "content": "Figure 3. Data generation pipelines for the two dataset categories in LLM-SRBench. (a) LSR-Transform converts Feynman problems into alternative mathematical forms through symbolic transformation and input-output role switching, and (b) LSR-Synth generates novel discovery-driven problems by combining known scientific terms in the underlying models with synthetic novel terms. Both pipelines include validation steps to ensure solvability and scientific plausibility." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.46, + 0.477, + 0.779 + ], + "angle": 0, + "content": "as the new output and the original output variable as an input in the transformed equation. 5) Solvability Check: We retain only those transformations that are analytically solvable, ensuring the feasibility of the resulting equations. 6) Dataset Refinement: For the transformed equations with altered data domains (e.g., due to square roots or denominators), we filter the original Feynman dataset to ensure all data points fall within the valid domains of the new equations. 7) Problem Reformulation: Using LLM (GPT4o), we generate a new natural language specification for each transformed problem. During this data generation process, we constrain the transformed equations' complexity (measured by expression tree node count) to the range of original Feynman dataset distribution (full analysis in Fig. 8, App.A.1). This allows us to focus on the semantic aspects of discovery—specifically the interplay between reasoning and memorization of the mathematical forms—rather than conflating performance with the ability to handle syntactically complex and lengthy hypotheses. We also exclude transformed problems that LLM can solve through direct sampling without requiring access to data." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.784, + 0.476, + 0.906 + ], + "angle": 0, + "content": "This process yields 111 total transformed equations derived from the 100 original Feynman problems. Each transformed equation shares the same scientific context, problem description, and variables as its original counterpart but presents a less common mathematical form to be discovered. The goal of LSR-Transform is not to discover new equations but to evaluate whether LLM-based systems can validate discoveries from non-trivial, data-driven transformations of known" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.46, + 0.889, + 0.581 + ], + "angle": 0, + "content": "equations. To support scientific knowledge-guided discovery, each task in LSR-Transform is supplemented with a natural language description of the scientific problem and dataset, including variable names and their meanings. These descriptions are absent in the original Feynman benchmark but they are needed for LLM-based scientific equation discovery methods to provide scientific context in prompts for knowledge-guided equation discovery by LLMs." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.597, + 0.611, + 0.612 + ], + "angle": 0, + "content": "2.2. LSR-Synth" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.62, + 0.889, + 0.878 + ], + "angle": 0, + "content": "This category is designed to assess whether LLMs can discover equations that incorporate new synthetic terms alongside known terms, requiring scientific as well as data-driven reasoning rather than reliance on memorization. The LSR-Synth dataset is motivated by the approach introduced in (Shojae et al., 2024b) for the handful of manually designed problems and systematically expands it into a comprehensive set of benchmark problems across diverse scientific domains. By combining known terms with synthetic, novel terms, LLMs are challenged to demonstrate discovery capabilities in unobserved contexts, yet leverage their knowledge in the process. The LSR-Synth dataset spans four scientific domains: chemistry, biology, physics, and material science, focusing on key scientific problems, including reaction kinetics in chemistry, population growth in biology, damped harmonic oscillators in physics, and stress-strain relationships in material science (examples in App. A.2)." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.885, + 0.889, + 0.901 + ], + "angle": 0, + "content": "The data generation process for LSR-Synth involves multi" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.086, + 0.477, + 0.737 + ], + "angle": 0, + "content": "ple steps , as illustrated in Fig. 3(b), to ensure the creation of high-quality, challenging benchmark problems: 1) Select Scientific Problem: We select problems from different scientific domains, such as reaction kinetics in chemistry or population dynamics in biology. 2) Known Term Generation: Given the problem description, we prompt an LLM (GPT-4o) to generate a list of common and well-known mathematical terms that typically appear in the underlying models. 3) Synthetic Term Generation: Similarly, we prompt the LLM to generate a list of diverse novel synthetic terms for a given scientific problem, along with descriptions of the problem and variables. For example, in chemistry reaction kinetics, known terms for reaction rate \\((dA / dt)\\) based on concentration \\((A)\\) and time \\((t)\\) might include first-order \\((-kA)\\) and second-order kinetics \\((-kA^2)\\) or the exponential decay term \\(-k\\exp (-k_st)\\), while synthetic terms could represent non-linear high-order saturation, e.g., \\(kA^2 /(1 + \\beta A^4)\\), or non-linear quantum tunneling effects, e.g., \\(kA\\exp (-\\frac{\\gamma}{t}) / t^2\\). 4) Solvability Check: After sampling from the generated known and synthetic terms and combining them into a complete mathematical expression, we verify the solvability of these expressions using numerical solvers such as solve_ivp in Python. This step ensures that the expressions are feasible, providing a basis for generating datapoints. 5) Novelty Check: In the context of each scientific problem and the complete expression, we evaluate the novelty of the new generated task using LLM (GPT-4o) as a novelty evaluator. This step is to verify that the synthetic terms are novel in the provided context and require data-driven reasoning rather than relying on established knowledge to be discovered. 6) Databe point Generation: For expressions that pass the solvability and novelty checks, we generate datapoints using numerical solvers based on the specified initial conditions and parameters. These datapoints are used to create the final task datasets. 7) Expert Validation: Finally, the filtered expressions, along with visualizations of their generated datapoints, are cross-checked by two subject matter experts to validate their plausibility. After these filtering steps, we finalize a candidate list of 128 problems across the four domains (36: chemistry; 24: biology; 43: physics; and 25: material science). More detailed analysis of LLM-SRBench datasets are provided in App. A." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.751, + 0.196, + 0.765 + ], + "angle": 0, + "content": "2.3. Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.774, + 0.476, + 0.895 + ], + "angle": 0, + "content": "Evaluating LLM-based scientific equation discovery methods introduces unique challenges due to the open-ended nature of the task and diverse symbolic representation of hypotheses. A discovered equation can be assessed from two perspectives: (a) data fidelity, which measures how well the equation fits the observed and out-of-domain (OOD) data, and (b) symbolic accuracy, which evaluates the alignment with ground-truth symbolic equation hypotheses. Both" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.886, + 0.116 + ], + "angle": 0, + "content": "perspectives are critical, as equations may exhibit similar symbolic forms but differ numerically, or vice versa." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.121, + 0.887, + 0.228 + ], + "angle": 0, + "content": "Data Fidelity. We evaluate data-driven fidelity using two known metrics in equation discovery: (1) Accuracy to tolerance \\(\\tau\\) (\\(\\mathrm{Acc}_{\\tau}\\)) (Kamienny et al., 2022; Biggio et al., 2021), and Normalized Mean Squared Error (NMSE). These metrics are computed on both in-domain test data and OOD data (when available) to assess generalization capacity, a crucial requirement for scientific equations." + }, + { + "type": "equation", + "bbox": [ + 0.561, + 0.242, + 0.823, + 0.275 + ], + "angle": 0, + "content": "\\[\n\\operatorname {A c c} _ {\\tau} = \\mathbb {1} \\left(\\max _ {1 \\leq i \\leq N _ {\\text {t e s t}}} \\left| \\frac {\\hat {y} _ {i} - y _ {i}}{y _ {i}} \\right| \\leq \\tau\\right),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.631, + 0.278, + 0.822, + 0.317 + ], + "angle": 0, + "content": "\\[\n\\mathrm {N M S E} = \\frac {\\sum_ {i = 1} ^ {N _ {\\mathrm {t e s t}}} (\\hat {y} _ {i} - y _ {i}) ^ {2}}{\\sum_ {i = 1} ^ {N _ {\\mathrm {t e s t}}} (y _ {i} - \\bar {y}) ^ {2}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.324, + 0.888, + 0.64 + ], + "angle": 0, + "content": "Symbolic Accuracy. We evaluate symbolic accuracy with a model-based evaluation strategy using GPT-4o as an evaluator (prompt in App. B, Fig. 11). This approach addresses the limitations of current symbolic metrics like recovery rate in symbolic regression (La Cava et al., 2016), which are very sensitive to exact symbolic matches and fail to account for mathematical equivalence, particularly in different hypothesis representations (e.g., equation as strings, expression trees, or Python programs). Here, GPT-4o evaluates mathematical equivalence by comparing the symbolic form of the predicted hypothesis versus the ground-truth equation after removing parameters and constants. The ability of LLMs to recognize semantic equivalence across different representations makes them particularly well-suited for evaluating LLM-based equation discovery methods, which often operate within a more diverse and open-ended hypothesis space. To validate this metric, two authors also independently evaluated symbolic equivalence on 130 sampled problems, finding \\(94.6\\%\\) agreement between GPT-4o and human evaluators. App. B provides more details on the evaluation metrics." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.66, + 0.63, + 0.677 + ], + "angle": 0, + "content": "3. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.686, + 0.671, + 0.702 + ], + "angle": 0, + "content": "3.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.71, + 0.887, + 0.906 + ], + "angle": 0, + "content": "We benchmark state-of-the-art LLM-based scientific equation discovery methods using three LLM backbones: one open-source model (Llama-3.1-8B-Instruct) and two proprietary models (GPT-4o-mini and GPT-3.5-turbo). Each discovery task takes as input the problem description, variables, the corresponding dataset, and an instruction specifying the task. The discovery methods then generate and refine equation hypotheses through LLMs. To ensure fair comparison, we standardize each of the methods to use 1k LLM calls per problem while maintaining their core algorithmic designs and hyperparameter settings. Detailed implementation specifics and prompts of each method are provided in App. C. We" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.094, + 0.888, + 0.137 + ], + "angle": 0, + "content": "Table 1. Comparison of different LLM-based scientific equation discovery methods on LLM-SRBench. Performance metrics include symbolic accuracy (SA), numeric precision \\((\\mathrm{Acc}_{0.1})\\), and normalized mean squared error (NMSE). Bold values indicate best performance within each method, and underlined values show best overall performance across discovery methods." + }, + { + "type": "table", + "bbox": [ + 0.09, + 0.144, + 0.887, + 0.43 + ], + "angle": 0, + "content": "
ModelsLSR-TransformLSR-Synth
ChemistryBiologyPhysicsMaterial Science
SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓
Direct Prompting (DataBlind)
Llama-3.1-8B-Instruct3.611.8010.36970.00.00.06440.00.00.54810.00.00.04590.00.00.0826
GPT-3.5-turbo2.101.8010.35530.08.330.00230.04.160.59900.02.270.02740.00.00.0277
GPT-4o-mini7.216.3060.26310.013.880.02210.04.160.46484.549.090.06470.00.00.0484
SGA (Ma et al., 2024)
Llama-3.1-8B-Instruct2.700.9090.35190.08.330.04580.00.00.24160.02.270.15490.012.120.0435
GPT-3.5-turbo0.00.9090.34650.08.330.00710.08.330.12792.274.540.02490.028.100.0019
GPT-4o-mini9.918.110.23210.016.665.46e-44.1612.510.01284.549.090.05110.036.116.02e-4
LaSR (Grayeli et al., 2024)
Llama-3.1-8B-Instruct5.4145.940.00210.027.772.77e-44.1616.662.73e-44.5425.020.00188.2164.227.44e-5
GPT-3.5-turbo12.6147.740.00150.038.891.51e-40.016.662.31e-46.8122.710.001120.6664.093.77e-5
GPT-4o-mini6.3150.450.00112.7738.929.11e-58.3320.831.53e-49.9131.819.94e-428.1272.049.23e-6
LLM-SR (Shojaece et al., 2024b)
Llama-3.1-8B-Instruct30.6338.550.01018.3366.668.01e-625.3058.331.04e-66.9734.091.23e-44.1088.121.15e-7
GPT-3.5-turbo10.8110.810.14490.050.222.87e-50.025.032.33e-50.025.128.84e-412.4282.142.75e-8
GPT-4o-mini31.5339.640.009111.1152.774.12e-616.6629.163.06e-69.9136.367.62e-520.2488.283.21e-9
" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.457, + 0.367, + 0.472 + ], + "angle": 0, + "content": "evaluate the following discovery methods:" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.478, + 0.475, + 0.554 + ], + "angle": 0, + "content": "LLM-SR (Shojace et al., 2024b), a program search equation discovery method that generates hypotheses of equation skeleton as Python functions with the main idea of combining LLMs' scientific knowledge with multi-island evolutionary search guided by feedback from data." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.56, + 0.476, + 0.651 + ], + "angle": 0, + "content": "LaSR (Grayeli et al., 2024), a concept learning equation discovery method that finds abstract textual concepts of mathematical relations from successful equation hypotheses with LLMs and uses these concepts to evolve new hypotheses through a hybrid approach of evolutionary search (with PySR (Cranmer, 2023)) and LLM-guided search." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.657, + 0.476, + 0.732 + ], + "angle": 0, + "content": "SGA (Ma et al., 2024), a bilevel optimization equation discovery method that iteratively combines LLMs for discrete hypothesis generation of scientific laws and physical simulations in PyTorch for continuous parameter optimization with respect to data." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.738, + 0.476, + 0.815 + ], + "angle": 0, + "content": "Direct Prompting (DataBlind) serves as a baseline for generating hypotheses purely from contextual information without access to data. By not using data-driven reasoning and refinement in the hypothesis generation, this baseline helps to assess LLMs' memorization of the problem." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.831, + 0.214, + 0.845 + ], + "angle": 0, + "content": "3.2. Main Results" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.854, + 0.476, + 0.9 + ], + "angle": 0, + "content": "Our experimental results (Table 1) reveals several key insights into the strengths and limitations of LLM-based scientific equation discovery methods. Overall, performance" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.456, + 0.689, + 0.603 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.695, + 0.456, + 0.886, + 0.603 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.497, + 0.619, + 0.887, + 0.689 + ], + "angle": 0, + "content": "Figure 4. Performance comparison across equation complexity levels for Feynman and LSR-Transform datasets: (a) symbolic accuracy and (b) numeric precision \\((\\mathrm{Acc}_{0.1})\\) showing considerable performance gap between these two datasets at same complexity levels (averaged over all method-LLM pairs)." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.725, + 0.889, + 0.906 + ], + "angle": 0, + "content": "remains relatively low across both symbolic and numeric metrics, underscoring the fundamental challenges of this task. One key observation is the poor performance of direct prompting method (DataBlind), which only relies on LLMs' knowledge about the problem without access to data for data-driven refinement. This result underscores the necessity of combining LLM reasoning with observational data, as relying solely on prior knowledge proves insufficient for accurate equation discovery across different problems in LLM-SRBench. We observe that on LSR-Transform data group, LaSR achieves the highest numerical accuracy, leading in both \\(\\mathrm{Acc}_{0.1}\\) and NMSE, while LLM-SR with GPT" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "image", + "bbox": [ + 0.111, + 0.086, + 0.868, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.29, + 0.888, + 0.32 + ], + "angle": 0, + "content": "Figure 5. Detailed results of in-domain (ID) and out-of-domain (OOD) performance using Normalized Mean Squared Error across various LSR-Synth scientific domains and LLM-based equation discovery methods (with GPT-4o-mini as LLM backbone)." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.343, + 0.477, + 0.449 + ], + "angle": 0, + "content": "4o-mini outperforms other methods in symbolic accuracy \\((\\sim 31\\%)\\). This comparative advantage inverts in the LSR-Synth material science problems, where LaSR consistently yields better symbolic accuracy and LLM-SR achieves better numerical precision, suggesting that different equation discovery strategies may be better suited to different problems." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.457, + 0.478, + 0.637 + ], + "angle": 0, + "content": "Another notable observation is the consistent outperformance of models using GPT-4o-mini and Llama-3.1-8B compared to those based on GPT-3.5-turbo. This may be due to improved reasoning architectures or better effectiveness of smaller, less opinionated models in the search and exploration needed for navigating space of possible equations. The lower performance on LSR-Synth compared to LSR-Transform tasks also indicates that the ability to find transformed variants of known problems does not necessarily extend to more challenging scenarios involving novel synthetic terms, where systematic data-driven exploration becomes essential." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.648, + 0.182, + 0.663 + ], + "angle": 0, + "content": "3.3. Analysis" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.671, + 0.478, + 0.899 + ], + "angle": 0, + "content": "LSR-Transform vs. Feynman datasets. We analyze the performance gap between Feynman and LSR-Transform datasets across different equation complexity levels, measured by the number of nodes in the corresponding expression tree (La Cava et al., 2021). Fig. 4 shows the aggregated average performance (over all methods and LLM backbones) in terms of both symbolic accuracy (a) and numeric precision (b). It can be observed that even at the same complexity levels, LSR-Transform problems are substantially more challenging for current discovery methods than original Feynman problems. Also, this performance disparity persists even for simpler problems ([0-15] nodes), indicating that the challenging nature of LSR-Transform problems for LLM-based scientific equation discovery methods is not necessarily due to the structural complexity." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.343, + 0.889, + 0.766 + ], + "angle": 0, + "content": "Performance on In-domain vs. OOD. Generalization to unseen data is a fundamental requirement for scientific laws and a critical aspect of equation discovery. A correct mathematical model of observations should not only fit observed data but also extrapolate accurately to out-of-domain (OOD) scenarios. However, current equation discovery benchmarks largely overlook this aspect. In this work, we advocate for explicit OOD assessment in scientific equation discovery by introducing held-out OOD test sets in our benchmark. To systematically evaluate generalization beyond observed data, we generate dedicated OOD test sets for synthetic problems in the LSR-Synth category (see App. A for details on data generation). Fig. 5 provides a comparative analysis of ID vs. OOD results. As expected, all discovery methods exhibit higher NMSE in OOD settings, indicating degraded generalization compared to in-domain data. Among the evaluated methods, LLM-SR achieves the lowest NMSE across both ID and OOD settings, while direct prompting performs the worst. Also, we observe some domain-specific variations in generalization performance: the performance gap between ID and OOD is more pronounced in chemistry and biology problems compared to physics and material science, although the complexity of problems are designed to be similar, as shown in Fig. 10. This suggests that different scientific problems may pose distinct challenges for equation discovery methods, highlighting the need for future research to develop more robust approaches for different scientific disciplines." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.773, + 0.889, + 0.895 + ], + "angle": 0, + "content": "OD generalization and symbolic accuracy. We further analyzed the correlation between our proposed symbolic accuracy metric (Sec. 2.3) and data-driven extrapolation performance in OOD settings (averaged over all LSR-Synth domains). As shown in Fig. 6, symbolic accuracy exhibits a strong positive correlation with numerical precision \\((\\mathrm{Acc}_{0.1})\\) on OOD data and a corresponding negative correlation with numerical error (NMSE). This strong correlation observed" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.923, + 0.492, + 0.935 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.083, + 0.293, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.083, + 0.472, + 0.223 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.24, + 0.477, + 0.325 + ], + "angle": 0, + "content": "Figure 6. Correlation between symbolic accuracy and OOD performance across different equation discovery methods and LLM backbones: (a) symbolic accuracy vs. \\(\\mathrm{Acc}_{0.1}\\) showing positive correlation; (b) symbolic accuracy vs. normalized mean squared error showing negative correlation. Results are averaged over all LSR-Synth datasets." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.358, + 0.475, + 0.465 + ], + "angle": 0, + "content": "between symbolic and OOD performance provides two key insights: First, it establishes OOD evaluation as a powerful approach for assessing the discovery of generalizable equations—an aspect often underutilized in symbolic regression research; second, it validates our LLM-based symbolic evaluation approach through its strong alignment with numeric generalization performance." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.471, + 0.477, + 0.532 + ], + "angle": 0, + "content": "More detailed experimental results, including both qualitative analyses of discovered equations and quantitative performance comparisons across scientific equation discovery methods and LLMs, are provided in App. D." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.545, + 0.228, + 0.56 + ], + "angle": 0, + "content": "4. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.566, + 0.475, + 0.763 + ], + "angle": 0, + "content": "AI for Scientific Discovery. Recent advancements in AI for science highlight the ability of LLMs to generate scientific hypotheses by leveraging their extensive knowledge and reasoning capabilities (Lu et al., 2024; Ji et al., 2024; Reddy & Shojaee, 2024). LLM agents, when augmented with external tools and scientific simulators, have shown promise in automated scientific data-driven analysis (Majumder et al., 2024a). While recent benchmarks have been developed to evaluate LLMs and agents in hypothesis generation and scientific question answering (Majumder et al., 2024b; Chen et al., 2024), evaluation for equation discovery and symbolic regression—one of the core tasks in scientific discovery—remains yet unexplored." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.77, + 0.476, + 0.906 + ], + "angle": 0, + "content": "Symbolic Regression. Symbolic regression approaches fall into three main categories: search-based methods that explore equation spaces via evolutionary algorithms or reinforcement learning (Schmidt & Lipson, 2009; Cranmer, 2023; Petersen et al., 2021; Sun et al., 2023), learning-based methods leveraging pre-trained Transformers on synthetic data (Biggio et al., 2021; Kamienny et al., 2022), and hybrid approaches that guide search using neural priors (Landajuela et al., 2022; Shojaee et al., 2024a; Mundhenk et al., 2021;" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.085, + 0.886, + 0.148 + ], + "angle": 0, + "content": "Meidani et al., 2023). While these methods have advanced the field of automated symbolic function discovery from data, they mostly lack mechanisms to incorporate scientific domain knowledge into the discovery process." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.153, + 0.888, + 0.547 + ], + "angle": 0, + "content": "LLMs for Equation Discovery. Recent work has leveraged LLM-based symbolic regression to enhance scientific equation discovery through various approaches leveraging LLMs' knowledge. LLM-SR (Shojaee et al., 2024b) utilizes LLMs' embedded scientific knowledge to generate initial equation hypotheses in the form of Python programming functions, which are then refined through adaptive mutation and crossover operations with LLMs as evolutionary optimizers. In-Context Symbolic Regression (ICSR) (Merler et al., 2024) employs an iterative few-shot learning paradigm over expression candidates, using previously tested successful expressions along with their fitness scores to guide the generation of improved candidates. LaSR (Grayeli et al., 2024) alternates between hypothesis evolution, concept abstraction, and concept iteration phases to build a learned library of scientific concepts for mathematical relations needed to find the equation for a given data. The learned concepts are then used with pure evolutionary search methods (Cranmer, 2023) like PySR (Cranmer, 2023) as well as LLM-guided search to guide the equation hypothesis evolution. Scientific Generative Agent (SGA) (Ma et al., 2024) also implements a bilevel optimization framework for equation discovery where LLMs iteratively propose discrete hypotheses for scientific laws while physical simulations in PyTorch provide experimental validation and data-driven parameter optimization." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.553, + 0.889, + 0.902 + ], + "angle": 0, + "content": "Symbolic Regression Benchmarks. Symbolic regression benchmarks can be broadly categorized into scientific discovery-oriented and general-purpose mathematical discovery collections. The scientific equation discovery benchmarks are primarily represented by the SRBench (La Cava et al., 2021) and SRSD (Matsubara et al., 2022) benchmarks. SRBench incorporates two key data groups for this purpose: the Feynman physics equations (Udrescu & Tegmark, 2020), and Strogatz dynamical systems (La Cava et al., 2016; Strogatz, 2018). A notable extension to this framework is presented in SRSD (Matsubara et al., 2022), which enhances the Feynman benchmark by incorporating physically meaningful sampling ranges for datapoints. The second category includes benchmarks like the Nguyen collection (Uy et al., 2011) and SRBench's black-box regression problems (La Cava et al., 2016) which include datasets without scientific contexts. However, these existing benchmarks are not well-suited for evaluating LLM-based equation discovery methods. These general-purpose benchmarks focus on the data-driven discovery of abstract mathematical functions without scientific context, while the former scientific benchmarks consist of well-known equations likely memorized by LLMs, enabling success through recitation rather than" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.072 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.085, + 0.475, + 0.162 + ], + "angle": 0, + "content": "scientific reasoning and discovery. Our work extends this line of research by focusing on scientific equation discovery with LLMs, designing the first comprehensive benchmark to assess discovery capabilities of LLM-based scientific equation discovery methods beyond memorization." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.18, + 0.206, + 0.196 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.206, + 0.477, + 0.493 + ], + "angle": 0, + "content": "We introduce LLM-SRBench, the first comprehensive benchmark for LLM-driven scientific equation discovery, encompassing 239 tasks across two distinct categories: LSR-Transform (111 problems derived from transformations of established physical models) and LSR-Synth (128 novel synthetic problems spanning four scientific disciplines). Our benchmark provides a standardized and multi-faceted evaluation protocol for assessing scientific equation discovery with LLMs, accommodating diverse hypothesis representations, including expression strings and programs. Extensive experiments with state-of-the-art discovery methods and various LLM backbones on LLM-SRBench show a peak performance of only \\(31\\%\\), highlighting the significant challenges and open research opportunities in this domain. We envision that LLM-SRBench benchmark datasets and its evaluation protocol could serve as a foundation for future research, driving progress in automated equation discovery and advancing our understanding of LLMs in symbolic reasoning needed in scientific discovery." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.513, + 0.241, + 0.53 + ], + "angle": 0, + "content": "Impact Statement" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.539, + 0.476, + 0.63 + ], + "angle": 0, + "content": "The development and future adoption of LLM-SRBench as a benchmark for evaluating LLM-based scientific equation discovery has the potential to significantly impact the field of artificial intelligence for science and scientific discovery. There are many potential societal consequences of our work, none of which we feel must be specifically highlighted here." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.646, + 0.22, + 0.661 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.669, + 0.475, + 0.7 + ], + "angle": 0, + "content": "This research was partially supported by the U.S. National Science Foundation (NSF) under Grant No. 2416728." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.719, + 0.183, + 0.735 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.743, + 0.476, + 0.833 + ], + "angle": 0, + "content": "Biggio, L., Bendinelli, T., Neitz, A., Lucchi, A., and Paras-candolo, G. Neural symbolic regression that scales. In Meila, M. and Zhang, T. (eds.), Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pp. 936-945. PMLR, 18-24 Jul 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.861, + 0.476, + 0.907 + ], + "angle": 0, + "content": "Chen, Z., Chen, S., Ning, Y., Zhang, Q., Wang, B., Yu, B., Li, Y., Liao, Z., Wei, C., Lu, Z., et al. Scienceagentbench: Toward rigorous assessment of language" + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.743, + 0.476, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.085, + 0.886, + 0.115 + ], + "angle": 0, + "content": "agents for data-driven scientific discovery. arXiv preprint arXiv:2410.05080, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.128, + 0.886, + 0.172 + ], + "angle": 0, + "content": "Cranmer, M. Interpretable machine learning for science with pysr and symbolicregression. jl. arXiv preprint arXiv:2305.01582, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.186, + 0.886, + 0.231 + ], + "angle": 0, + "content": "Du, M., Chen, Y., Wang, Z., Nie, L., and Zhang, D. Large language models for automatic equation discovery of nonlinear dynamics. Physics of Fluids, 36(9), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.244, + 0.886, + 0.289 + ], + "angle": 0, + "content": "Grayeli, A., Sehgal, A., Costilla-Reyes, O., Cranmer, M., and Chaudhuri, S. Symbolic regression with a learned concept library. arXiv preprint arXiv:2409.09359, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.301, + 0.886, + 0.392 + ], + "angle": 0, + "content": "Ji, H., Wang, Q., Downey, D., and Hope, T. Scimon: Scientific inspiration machines optimized for novelty. In ACL Anthology: Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 279-299. University of Illinois Urbana-Champaign/CABBI, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.405, + 0.886, + 0.464 + ], + "angle": 0, + "content": "Kamienny, P.-A., d'Ascoli, S., Lample, G., and Charton, F. End-to-end symbolic regression with transformers. In Advances in Neural Information Processing Systems, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.478, + 0.886, + 0.583 + ], + "angle": 0, + "content": "La Cava, W., Danai, K., and Spector, L. Inference of compact nonlinear dynamic models by epigenetic local search. Engineering Applications of Artificial Intelligence, 55:292-306, 2016. ISSN 0952-1976. doi: https://doi.org/10.1016/j.engappai.2016.07.004. URL https://www.sciencedirect.com/science/article/pii/S0952197616301294." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.596, + 0.886, + 0.686 + ], + "angle": 0, + "content": "La Cava, W., Orzechowski, P., Burlacu, B., de Franca, F., Virgolin, M., Jin, Y., Kommenda, M., and Moore, J. Contemporary symbolic regression methods and their relative performance. In Vanschoren, J. and Yeung, S. (eds.), Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, volume 1, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.699, + 0.886, + 0.789 + ], + "angle": 0, + "content": "Landajuela, M., Lee, C., Yang, J., Glatt, R., Santiago, C. P., Aravena, I., Mundhenk, T. N., Mulcahy, G., and Petersen, B. K. A unified framework for deep symbolic regression. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K. (eds.), Advances in Neural Information Processing Systems, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.803, + 0.886, + 0.833 + ], + "angle": 0, + "content": "Langley, P. Data-driven discovery of physical laws. Cognitive Science, 5(1):31-54, 1981." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.846, + 0.886, + 0.905 + ], + "angle": 0, + "content": "Lu, C., Lu, C., Lange, R. T., Foerster, J., Clune, J., and Ha, D. The ai scientist: Towards fully automated open-ended scientific discovery. arXiv preprint arXiv:2408.06292, 2024." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.886, + 0.905 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.072 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.085, + 0.479, + 0.177 + ], + "angle": 0, + "content": "Ma, P., Wang, T.-H., Guo, M., Sun, Z., Tenenbaum, J. B., Rus, D., Gan, C., and Matusik, W. LLM and simulation as bilevel optimizers: A new paradigm to advance physical scientific discovery. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=hz8cFsdz7P." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.187, + 0.479, + 0.247 + ], + "angle": 0, + "content": "Majumder, B. P., Surana, H., Agarwal, D., Hazra, S., Sabharwal, A., and Clark, P. Data-driven discovery with large generative models. arXiv preprint arXiv:2402.13610, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.258, + 0.478, + 0.333 + ], + "angle": 0, + "content": "Majumder, B. P., Surana, H., Agarwal, D., Mishra, B. D., Meena, A., Prakhar, A., Vora, T., Khot, T., Sabharwal, A., and Clark, P. Discoverybench: Towards data-driven discovery with large language models. arXiv preprint arXiv:2407.01725, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.344, + 0.478, + 0.404 + ], + "angle": 0, + "content": "Matsubara, Y., Chiba, N., Igarashi, R., Tatsunori, T., and Ushiku, Y. Rethinking symbolic regression datasets and benchmarks for scientific discovery. arXiv preprint arXiv:2206.10540, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.415, + 0.478, + 0.49 + ], + "angle": 0, + "content": "Meidani, K., Shojaee, P., Reddy, C. K., and Farimani, A. B. Snip: Bridging mathematical symbolic and numeric realms with unified pre-training. In The Twelfth International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.501, + 0.478, + 0.592 + ], + "angle": 0, + "content": "Merler, M., Haitsiukevich, K., Dainese, N., and Marttinen, P. In-context symbolic regression: Leveraging large language models for function discovery. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop), pp. 589-606, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.602, + 0.478, + 0.663 + ], + "angle": 0, + "content": "Mirzadeh, I., Alizadeh, K., Shahrokhi, H., Tuzel, O., Bengio, S., and Farajtabar, M. Gsm-symbolic: Understanding the limitations of mathematical reasoning in large language models. arXiv preprint arXiv:2410.05229, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.673, + 0.478, + 0.764 + ], + "angle": 0, + "content": "Mundhenk, T. N., Landajuela, M., Glatt, R., Santiago, C. P., faissol, D., and Petersen, B. K. Symbolic regression via deep reinforcement learning enhanced genetic programming seeding. In Beygelzimer, A., Dauphin, Y., Liang, P., and Vaughan, J. W. (eds.), Advances in Neural Information Processing Systems, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.774, + 0.478, + 0.851 + ], + "angle": 0, + "content": "Petersen, B. K., Larma, M. L., Mundhenk, T. N., Santiago, C. P., Kim, S. K., and Kim, J. T. Deep symbolic regression: Recovering mathematical expressions from data via risk-seeking policy gradients. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.861, + 0.478, + 0.906 + ], + "angle": 0, + "content": "Reddy, C. K. and Shojaee, P. Towards scientific discovery with generative ai: Progress, opportunities, and challenges. arXiv preprint arXiv:2412.11427, 2024." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.479, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.085, + 0.886, + 0.145 + ], + "angle": 0, + "content": "Schmidt, M. and Lipson, H. Distilling free-form natural laws from experimental data. Science Advance, 324 (5923):81-85, 2009. ISSN 0036-8075. doi: 10.1126/science.1165893." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.156, + 0.887, + 0.215 + ], + "angle": 0, + "content": "Shojae, P., Meidani, K., Barati Farimani, A., and Reddy, C. Transformer-based planning for symbolic regression. Advances in Neural Information Processing Systems, 36, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.226, + 0.887, + 0.286 + ], + "angle": 0, + "content": "Shojaee, P., Meidani, K., Gupta, S., Farimani, A. B., and Reddy, C. K. Llm-sr: Scientific equation discovery via programming with large language models. arXiv preprint arXiv:2404.18400, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.297, + 0.887, + 0.342 + ], + "angle": 0, + "content": "Strogatz, S. H. Nonlinear dynamics and chaos with student solutions manual: With applications to physics, biology, chemistry, and engineering. CRC press, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.352, + 0.887, + 0.413 + ], + "angle": 0, + "content": "Sun, F., Liu, Y., Wang, J.-X., and Sun, H. Symbolic physics learner: Discovering governing equations via monte carlo tree search. In The Eleventh International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.422, + 0.887, + 0.483 + ], + "angle": 0, + "content": "Udrescu, S.-M. and Tegmark, M. Ai feynman: A physics-inspired method for symbolic regression. Science Advances, 6(16):eaay2631, 2020. doi: 10.1126/sciadv.aay2631." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.493, + 0.887, + 0.568 + ], + "angle": 0, + "content": "Uy, N. Q., Hoai, N. X., O'Neill, M., McKay, R. I., and Galván-López, E. Semantically-based crossover in genetic programming: application to real-valued symbolic regression. Genetic Programming and Evolvable Machines, 12:91-119, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.578, + 0.887, + 0.623 + ], + "angle": 0, + "content": "Virgolin, M. and Pissis, S. P. Symbolic regression is NP-hard. Transactions on Machine Learning Research, 2022. ISSN 2835-8856." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.633, + 0.887, + 0.709 + ], + "angle": 0, + "content": "Wu, Z., Qiu, L., Ross, A., Akyurek, E., Chen, B., Wang, B., Kim, N., Andreas, J., and Kim, Y. Reasoning or reciting? exploring the capabilities and limitations of language models through counterfactual tasks. arXiv preprint arXiv:2307.02477, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.719, + 0.887, + 0.78 + ], + "angle": 0, + "content": "Xie, C., Huang, Y., Zhang, C., Yu, D., Chen, X., Lin, B. Y., Li, B., Ghazi, B., and Kumar, R. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.79, + 0.887, + 0.851 + ], + "angle": 0, + "content": "Zhang, Y., Zheng, K., Liu, F., Zhang, Q., and Wang, Z. Autoturb: Using large language models for automatic algebraic model discovery of turbulence closure. arXiv preprint arXiv:2410.10657, 2024." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.887, + 0.851 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.084, + 0.173, + 0.101 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.111, + 0.241, + 0.126 + ], + "angle": 0, + "content": "A. Dataset Details" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.136, + 0.236, + 0.151 + ], + "angle": 0, + "content": "A.1. LSR-Transform" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.16, + 0.89, + 0.419 + ], + "angle": 0, + "content": "The LSR-Transform is the first category of datasets in LLM-SRBench, designed to evaluate the ability of LLM-based scientific equation discovery methods in less common mathematical forms. This dataset challenges LLM-based discovery methods to avoid reliance on memorization of well-known representations and instead reason through unfamiliar instantiations of familiar problems. This approach is motivated by the observation that LLMs often struggle with unfamiliar instantiations of otherwise familiar problems, as highlighted by recent studies on the fragility of LLM reasoning (Mirzadeh et al., 2024). By transforming existing benchmark problems into alternative mathematical representations, LSR-Transform provides a rigorous testbed to evaluate how well LLM-based discovery methods perform in both (1) semantic scientific reasoning, which draws on LLMs' built-in scientific knowledge, and (2) data-driven reasoning, which utilizes experimental feedback for equation discovery. LSR-Transform builds on the Feynman benchmark (Udrescu & Tegmark, 2020), a widely used standard benchmark in scientific equation discovery and symbolic regression. The Feynman benchmark consists of 100 physics equations from Feynman Lecture Series\\(^{1}\\), representing fundamental laws in physics. While the Feynman benchmark has been instrumental in evaluating symbolic regression methods, it primarily tests the ability to recover equations in their standard, well-known forms which are mostly memorized by LLMs. However, real-world scientific equation discovery often involves reasoning about unknown equations based on domain expertise and knowledge from literature as well as empirical data observations. To address this gap, LSR-Transform transforms the original Feynman equations into less common alternative mathematical forms of the same physical problem by switching input-output variables and symbolically solving for the new target variables." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.431, + 0.797, + 0.755 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.771, + 0.877, + 0.787 + ], + "angle": 0, + "content": "Figure 7. Examples of how LLM-SRBench (LSR-Transform) problems can be obtained from original Feynman benchmark problems." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.797, + 0.889, + 0.884 + ], + "angle": 0, + "content": "Figure 7 demonstrates the equation transformation process, showing examples of the original Feynman problems (along with their scientific descriptions) and their potential transformed versions. These examples show the dataset's design for altering the mathematical representation of the same problem by analytically solving the equations with respect to different input variables. For instance, the original harmonic oscillator energy equation \\( E = \\frac{1}{4} m(\\omega^2 + \\omega_0^2)x^2 \\) is transformed into symbolic representation of \\( m = \\frac{4E}{(\\omega^2 + \\omega_0^2)x^2} \\) and \\( \\omega = \\sqrt{\\frac{4E}{mx^2} - \\omega_0^2} \\) where the target variable is switched from energy \\( (E) \\)" + }, + { + "type": "page_footnote", + "bbox": [ + 0.106, + 0.892, + 0.549, + 0.907 + ], + "angle": 0, + "content": " float: \"\"Evaluate the equation on data observations.\"\"\"" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.072 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "table_caption", + "bbox": [ + 0.233, + 0.094, + 0.741, + 0.108 + ], + "angle": 0, + "content": "Table 2. Implementation details of LLM-based scientific equation discovery methods." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.108, + 0.805, + 0.443 + ], + "angle": 0, + "content": "
MethodParameters
Direct Prompting (DataBlind)Temperature τ = 0.85 equation program hypotheses sampled from LLM for initial promptNo access to data for data-driven refinementTime limit T = 30s per program hypothesis execution,BFGS optimizer from Scipy for parameter optimization of equation skeletons
SGA (Ma et al., 2024)PyTorch-based implementation of model and torch nn. Module classMean square error loss for data-driven feedback in agentic searchAdam optimizer in PyTorch for differential parameter optimization of equation skeletons
LaSR (Grayeli et al., 2024)Iterations = 25Cycles per iteration = 550Populations = 10Population size = 33Maximum size = 30Operators: +, *, -, /, ∧, exp, log, sqrt, sin, cos, tan, coshLLM weights: llm_mutate =0.005, llm_crossover =0.005, llm_gen_random =0.005Top-K = 20 concepts from libraryDefault configuration of PySR for parameter optimization
LLM-SR (Shojaee et al., 2024b)Temperature τ = 0.8Batch size b = 4 equation programs per prompte = 4 parallel evaluatorsTime limit T = 30s per program hypothesis,Memory limit M = 2GBm = 10 islands for population diversity through searchk = 2 in-context examples per promptMaximum 10 parameters per equation skeletonBFGS optimizer from Scipy for parameter optimization of equation skeletons
" + }, + { + "type": "code", + "bbox": [ + 0.145, + 0.47, + 0.528, + 0.655 + ], + "angle": 0, + "content": "# Load data observations \ninputs, outputs = data['inputs'], data['outputs'] \nX = inputs \n# Optimize parameters based on data \nfrom scipy.optimize import minimize \ndef loss.params): \n y_pred = equation(*X, params) \n return np.mean((y_pred - outputs) ** 2) \nloss_partial = lambda params: loss.params) \nresult = minimize(loss_partial, [1.0]*MAX_NPARAMS, method='BFGS') \n# Return evaluation score \noptimized.params = result.x \nloss = result(fun \nif np.isnan(loss) or np.isinf(loss): \n return None \nelse: \n return -loss" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.672, + 0.536, + 0.687 + ], + "angle": 0, + "content": "3. Equation example specification as Python programming function." + }, + { + "type": "code", + "bbox": [ + 0.127, + 0.701, + 0.816, + 0.905 + ], + "angle": 0, + "content": "```python\ndef equation_v0(\\(INPUT VAR[0], ..., \\)INPUT VAR[N], params):\n ''' Mathematical function for {$OUTPUT VAR_DESC}\nArgs:\n $INPUT VAR[0]: A numpy array representing observations of {$INPUT VAR_DESC[0]}.\n ...\n $INPUT VAR[N]: A numpy array representing observations of {$INPUT VAR_DESC[N]}.\nparams: Array of numeric constants or parameters to be optimized\nReturn: A numpy array representing {$OUTPUT VAR_DESC} as the result of applying the mathematical function to the inputs.\n'''# Equation example 1 logic as function body\n...\ndef equation_v1(\\)INPUT VAR[0], ..., \\)INPUT VAR[N], params):\n # Equation example 2\n...\n## Function to be completed" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.495, + 0.935 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.058, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "code", + "bbox": [ + 0.127, + 0.089, + 0.488, + 0.109 + ], + "angle": 0, + "content": "def equation(\\(INPUT VAR[0], ..., \\)INPUT VAR[N], params):\n ''' Improvement version of equation_v0 and equation_v1'''" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.132, + 0.182, + 0.145 + ], + "angle": 0, + "content": "C.2.2. LASR" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.156, + 0.886, + 0.186 + ], + "angle": 0, + "content": "We use the default prompts from LaSR's (Grayeli et al., 2024) public code repository (https://github.com/trishullah/LibraryAugmentedSymbolicRegression.jl), which includes:" + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.205, + 0.675, + 0.22 + ], + "angle": 0, + "content": "1. The LLMINIT prompt, which is used in an LLM-augmented initialization operation." + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.23, + 0.672, + 0.246 + ], + "angle": 0, + "content": "2. LLMMUTATION prompt is used to mutate an expression based on a set of concepts." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.255, + 0.886, + 0.286 + ], + "angle": 0, + "content": "3. LLMCROSSOVER prompt is used to construct a new expression from the crossover of two sampled expressions based on a set of concepts." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.296, + 0.885, + 0.326 + ], + "angle": 0, + "content": "4. LLM Concept Abstraction prompt in CONCEPTABSTRACTION function, which extracts a natural language concept from current trends of hypotheses at each iteration." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.336, + 0.886, + 0.366 + ], + "angle": 0, + "content": "5. LLM Concept Evolution prompt in CONCEPTEVOLUTION function, which creates a new concept that follows a set of ideas in the current library." + }, + { + "type": "list", + "bbox": [ + 0.097, + 0.205, + 0.886, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.385, + 0.457, + 0.402 + ], + "angle": 0, + "content": "In the following, we provide examples of these prompts." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.408, + 0.229, + 0.423 + ], + "angle": 0, + "content": "1. LLMINIT prompt." + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.429, + 0.829, + 0.595 + ], + "angle": 0, + "content": " \nYou are a helpful assistant that proposes a mathematical expression by following three provided suggestions. An expression must consist of the following variables: {{variables}}. All constants will be represented with the symbol C. Each expression will only use these operators: {{operators}}. \n \nSuggestion 1: {{assume1}} \nSuggestion 2: {{assume2}} \nSuggestion 3: {{assume3}} \nPropose {{N}} expressions that would be appropriate given the suggestions. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: \n``'json \n[\"expr1\", \"expr2\", ... \"expr{N}\"] \n]" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.614, + 0.276, + 0.63 + ], + "angle": 0, + "content": "2. LLMMUTATION prompt." + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.637, + 0.829, + 0.82 + ], + "angle": 0, + "content": " \nYou are a helpful assistant that mutates a mathematical expression by following a few provided suggestions. You will be given three suggestions and a single reference expression to mutate. \nAn expression must consist of the following variables: \\(\\{\\{variables\\}\\}\\) . All constants will be represented with the symbol C. Each expression will only use these operators: \\(\\{\\{\\mathrm{operators}\\}\\}\\) \n \nSuggestion 1:{\\{assume1\\}} \nSuggestion 2:{\\{assume2\\}} \nSuggestion 3:{\\{assume3\\}} \nReference Expression:{\\{expr\\}} \nPropose \\(\\{\\{\\mathbf{N}\\}\\}\\) expressions that would be appropriate given the suggestions and references. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: \n``'json \n[\"expr1\", \"expr2\", ... \"expr.{N}\"] \n]" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.839, + 0.285, + 0.854 + ], + "angle": 0, + "content": "3. LLMCROSSOVER prompt." + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.861, + 0.829, + 0.905 + ], + "angle": 0, + "content": " \nYou are a helpful assistant that recombines two mathematical expressions by following a few provided suggestions. You will be given three suggestions and two reference expressions to recombine. \nAn expression must consist of the following variables: \\(\\{\\{variables\\}\\}\\) . All constants will be represented with the symbol C. Each expression will only use these operators: \\(\\{\\{\\mathrm{operators}\\}\\}\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.495, + 0.935 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.058, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.097, + 0.825, + 0.239 + ], + "angle": 0, + "content": " \nSuggestion 1: {{assume1}} \nSuggestion 2: {{assume2}} \nSuggestion 3: {{assume3}} \nReference Expression 1: {{expr1}} \nReference Expression 2: {{expr2}} \nPropose {{N}} expressions that would be appropriate given the suggestions and references. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: \n``'json \n[\"expr1\", \"expr2\", ... \"expr.{N}\" \n]" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.258, + 0.336, + 0.274 + ], + "angle": 0, + "content": "4. LLM Concept Abstraction prompt." + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.281, + 0.83, + 0.328 + ], + "angle": 0, + "content": " \nYou are a helpful assistant that hypothesizes about the underlying assumptions that generated a list of good and bad mathematical expressions in detailed ways. My ultimate goal is to discover what assumptions generated the observed good mathematical expressions and excludes the bad mathematical expressions. Focus more on the good expressions, their mathematical structure, and any relation to physical concepts. Note that capital C represents an arbitrary constant" + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.334, + 0.819, + 0.543 + ], + "angle": 0, + "content": " \nGood Expression 1: {gexpr1} \nGood Expression 2: {gexpr2} \nGood Expression 3: {gexpr3} \nGood Expression 4: {gexpr4} \nGood Expression 5: {gexpr5} \nBad Expression 1: {bexpr1} \nBad Expression 2: {bexpr2} \nBad Expression 3: {bexpr3} \nBad Expression 4: {bexpr4} \nBad Expression 5: {bexpr5} \nPropose \\(\\{\\{N\\}\\}\\) hypotheses that would be appropriate given the expressions. Provide short commentary for each of your decisions. Do not talk about topics related to the simplicity or complexity of the expressions. I want ideas that are unique and interesting enough to amaze the world's best mathematicians. End with a JSON list that enumerates the proposed hypotheses following this format: \n``'json \n[\"hyp1\", \"hyp2\", ... \"hyp.{N}]'' \n]" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.564, + 0.325, + 0.579 + ], + "angle": 0, + "content": "5. LLM Concept Evolution prompt." + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.588, + 0.836, + 0.625 + ], + "angle": 0, + "content": " You are an insightful assistant skilled in logical reasoning and deduction. Your task is to analyze a set of ideas and infer nontrivial conclusions that logically follow from them. The ultimate goal is to uncover underlying principles or properties of the hidden expressions. Focus on providing logical conclusions that are unique, interesting, and profound." + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.631, + 0.836, + 0.788 + ], + "angle": 0, + "content": " \nIdea 1:{ideal} \nIdea 2:{idea2} \nIdea 3:{idea3} \nIdea 4:{idea4} \nIdea 5:{idea5} \nBased on these ideas, deduce \\(\\{\\{N\\}\\}\\) logical conclusions or hypotheses that directly follow from them. Provide a brief explanation for each conclusion, highlighting the logical connections between the ideas. Avoid discussing topics related to the simplicity or complexity of the expressions. Conclude with a JSON list that enumerates the proposed conclusions in the following format: \n``'json \n[\"Conclusion 1\", \"Conclusion 2\", ... \"Conclusion {{N}}]\" \n]" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.815, + 0.175, + 0.829 + ], + "angle": 0, + "content": "C.2.3. SGA" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.839, + 0.889, + 0.882 + ], + "angle": 0, + "content": "The following prompts are used in our implementation of SGA (Ma et al., 2024) for scientific equation discovery tasks, following the original implementation SGA's public code repository (https://github.com/PingchuanMa/SGA), which includes:" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.892, + 0.248, + 0.906 + ], + "angle": 0, + "content": "System prompt for task." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.058, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.097, + 0.645, + 0.179 + ], + "angle": 0, + "content": "You are an intelligent AI assistant for coding and scientific equation discovery. \nYou are tasked with discovering mathematical function structures for scientific systems. \nFollow the user's requirements carefully and make sure you understand them. \nKeep your answers short and to the point. \nDo not provide any information that is not requested.. \nAlways document your code as comments to explain the reason behind them. \nUse Markdown to format your solution. \nYou are very familiar with Python and PyTorch. \nDo not use any external libraries other than the libraries used in the examples." + }, + { + "type": "code_caption", + "bbox": [ + 0.085, + 0.196, + 0.497, + 0.211 + ], + "angle": 0, + "content": "Code formatting prompt for scientific equation discovery task." + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.226, + 0.822, + 0.537 + ], + "angle": 0, + "content": "```python\n## PyTorch Tips\n1. When working with tensors, always use PyTorch's operators (such as 'torch.exp', 'torch.cos', 'torch.sqrt', ...) to ensure compatibility and optimal performance.\n2. In PyTorch, operator input arguments must be tensors, not floats.\n## Code Requirements\n1. The only library allowed is PyTorch. Follow the format provided by the user examples.\n2. Annotate the size of the tensor as comment after each tensor operation. For example, # (B, 3, 3).\n3. Separate the code into parameters that can be tuned with differentiable optimization and the symbolic expression represented by PyTorch code. Define them respectively in the\n5. The proposed code must strictly follow the structure and function signatures below:\n``'python\nimport torch\nimport torch(nn as nn)\nclass SymbolicEquation(nn.Module):\n def __init__(self, {PARAM_INPUTS}):\n Define trainable continuous parameters for differentiable optimization.\n Tentatively initialize the parameters with the default values in args.\n Args:\n {PARAM_DESCRIPTION}\n super().__init__()\n {PARAM_INIT}\n def forward(self, {INPUT_variables}) -> torch.Tensor:\n {FORWARD_FUNCTIONDescriptions}\n```" + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.545, + 0.831, + 0.59 + ], + "angle": 0, + "content": "1. Analyze step-by-step what the potential problem is in the previous iterations based on the feedback. Think about why the results from previous iterations mismatched with the ground truth. Do not give advice about how to optimize. Focus on the formulation of the scientific equation. Start this section with \"#Analysis\". Analyze all iterations individually, and start the subsection for each iteration with \"#Iteration N\", where N stands for the index. Remember to analyze every iteration in the history." + }, + { + "type": "code", + "bbox": [ + 0.125, + 0.598, + 0.836, + 0.643 + ], + "angle": 0, + "content": "2. Think step-by-step what you need to do in this iteration. Think about what is needed to improve performance. If the analysis suggests specific functional forms or constraints, think about how these will be incorporated into the symbolic equation. Think about how to separate your algorithm into a continuous parameter part and a symbolic expression model part. Describe your plan in pseudo-code, written out in great detail. Remember to update the default values of the trainable parameters based on previous optimizations. Start this section with \"# Step-by-Step Plan\"." + }, + { + "type": "code", + "bbox": [ + 0.125, + 0.65, + 0.825, + 0.669 + ], + "angle": 0, + "content": "3. Output the code in a single code block ''``python ... ''`` with detailed comments in the code block. Do not add any trailing comments before or after the code block. Start this section with \"# Code\"." + }, + { + "type": "list", + "bbox": [ + 0.125, + 0.545, + 0.836, + 0.669 + ], + "angle": 0, + "content": null + }, + { + "type": "code_caption", + "bbox": [ + 0.085, + 0.686, + 0.377, + 0.701 + ], + "angle": 0, + "content": "Context prompt for each scientific problem." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.717, + 0.196, + 0.726 + ], + "angle": 0, + "content": "# # # Context" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.735, + 0.836, + 0.789 + ], + "angle": 0, + "content": "The objective is to construct a mathematical expression that accurately maps input variables to a target output based on a provided dataset. The task involves filling in a code block to define a symbolic expression or model that minimizes the difference between predicted and ground-truth outputs. The code block defines a class with two functions: one for parameters within the expression and another for generating or modifying the symbolic structure of the expression. Feedback is provided in the form of metrics measuring the error between the model's predictions and the ground-truth values, as well as guidance on structural improvements to the symbolic expression." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.795, + 0.569, + 0.807 + ], + "angle": 0, + "content": "The expression represents \\(\\{\\) OUTPUT VAR DESC\\}, given data on \\(\\{\\) INPUTS DESC\\}." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.835, + 0.383, + 0.853 + ], + "angle": 0, + "content": "D. Additional Results and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.861, + 0.89, + 0.907 + ], + "angle": 0, + "content": "Detailed Numeric Accuracy Analysis. While Table 1 presents median Normalized Mean Squared Error for each method-LLM combination across LLM-SRBench datasets, Figure 12 provides a more comprehensive view of error distributions across all samples. These box plots illustrate performance variations across LLM-SRBench datasets from two perspectives:" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.924, + 0.497, + 0.935 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.085, + 0.888, + 0.177 + ], + "angle": 0, + "content": "comparing different equation discovery methods with GPT-4o-mini as the LLM backbone, and examining different LLM backbones when using LLM-SR method. The substantial variance in NMSE performance across samples reflects the diverse complexity inherent in our benchmark—stemming from both the varying mathematical transformations in LSR-Transform and the different combinations of known and synthetic terms in LSR-Synth datasets. Notably, the relative difficulty of datasets varies across methods and LLM backbones, suggesting that different methods and LLMs possess distinct capabilities in terms of leveraging domain knowledge, reasoning, and generating novel hypotheses." + }, + { + "type": "image", + "bbox": [ + 0.131, + 0.192, + 0.49, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.491, + 0.192, + 0.847, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.424, + 0.888, + 0.465 + ], + "angle": 0, + "content": "Figure 12. Normalized Mean Squared Error (NMSE) of discovered equations in various domains of LLM-SRBench with respect to (left) different equation discovery methods using GPT-4omini LLM backbone, and (right) different LLM backbones using LLM-SR method" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.489, + 0.888, + 0.596 + ], + "angle": 0, + "content": "Symbolic Accuracy and Generalization. For scientific equation discovery methods, both symbolic accuracy and out-of-domain generalization serve as crucial evaluation metrics, reflecting the methods' ability to uncover true governing equations. Figure 13 examines the relationship between these metrics, plotting symbolic accuracy against both OOD accuracy and OOD NMSE across all method-LLM-domain combinations in LSR-Synth. The strong correlation observed between symbolic and OOD performance yields two important insights: first, it establishes OOD evaluation as a powerful metric for assessing the discovery of generalizable equations, an approach historically underutilized in symbolic regression; second, it validates our LLM-based symbolic evaluation approach through its strong alignment with numeric generalization performance." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.612, + 0.888, + 0.749 + ], + "angle": 0, + "content": "Qualitative Analysis of Outputs. To provide deeper insights into the behavior of different discovery methods, Figure 14 illustrates their final discovered hypotheses on a biological population growth problem (BPG0) using Llama-3.1-8B as the LLM backbone. Direct Prompting (Figure 14(a)) generates equations that capture basic population dynamics, demonstrating LLMs' ability to propose scientifically plausible structures. SGA's solution (Figure 14(b)) successfully incorporates one of the common population growth terms while exploring additional structural components. LaSR (Figure 14(c)) discovers an equation structure that combines multiple interaction terms, though it differs from established scientific formulations. LLM-SR (Figure 14(d)) combines both standard population dynamics terms and synthetic components in its solution. These examples demonstrate the diverse approaches methods take in balancing scientific interpretability with mathematical expressiveness when discovering equation structures." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.767, + 0.394, + 0.784 + ], + "angle": 0, + "content": "E. Discussion and Future Directions" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.792, + 0.888, + 0.884 + ], + "angle": 0, + "content": "Our findings from LLM-SRBench reveal several key insights that inform the design of future LLMs for scientific discovery applications. Scientific equation discovery remains a challenging problem for LLMs, requiring a complex interplay of domain knowledge, search capabilities with data-driven feedback, and mathematical manipulation skills. Our results demonstrate that this problem poses significant challenges for LLM-based discovery frameworks across different model architectures, suggesting that current approaches may be fundamentally limited in their ability to perform genuine scientific discovery." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.891, + 0.887, + 0.907 + ], + "angle": 0, + "content": "This work questions the current evaluation paradigm for equation discovery in emerging LLM-based techniques. We" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.085, + 0.845, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.155, + 0.348, + 0.812, + 0.362 + ], + "angle": 0, + "content": "Figure 13. Symbolic Accuracy versus OOD performance over all domains, methods, and backbone LLM pairs." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.39, + 0.889, + 0.689 + ], + "angle": 0, + "content": "demonstrate that existing benchmarks for this task are susceptible to memorization and inadequate for evaluating these techniques' true scientific discovery capabilities. Motivated by these limitations, we designed LLM-SRBench to address the memorization issue through two key innovations: synthetic imaginary scenarios (LSR-Synth category) that are not based on existing scientific knowledge and require data-driven discovery tools for solution, and transformed equations (LSR-Transform category) that convert common forms of scientifically known equations into less familiar formulations. The LSR-Synth category targets genuine innovation in LLM-based discovery techniques by eliminating the possibility of recalling memorized equations, while LSR-Transform problems are difficult to recite from memory and require reasoning over hypothesis generation steps, making them suitable candidates for evaluating recently emerging LLM-based scientific discovery agents. While the mathematical transformations in LSR-Transform are algebraically valid, their scientific meaningfulness varies considerably across contexts. Many transformations correspond to legitimate physics problems from the Feynman Lecture Series collection and represent alternative problem formulations with practical significance. For example, in the Harmonic Oscillator Energy problem, the original formulation \\( E = \\frac{1}{4} m(\\omega^2 + \\omega_0^2)x^2 \\) expresses energy as a function of system parameters, while the transformed version \\( m = \\frac{4E}{(\\omega^2 + \\omega_0^2)x^2} \\) determines the mass required for given energy storage. This transformation maintains scientific meaning by addressing the engineering question of what mass is needed to store a specific amount of energy in an oscillating system, and such inversions are common in engineering design problems where system parameters must be determined to achieve desired performance characteristics. Similarly, the Electric Potential problem transforms from \\( V_e = \\frac{1}{4\\pi\\epsilon}\\frac{p_d\\cos(\\theta)}{r^2} \\) (potential at a point due to a dipole) to \\( r = \\sqrt{\\frac{p_d\\cos(\\theta)}{4\\pi\\epsilon V_e}} \\) (distance for a given potential), addressing the practical question of determining measurement distances in electrostatic experiments or sensor design." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.695, + 0.889, + 0.907 + ], + "angle": 0, + "content": "However, not all transformations maintain clear physical interpretability. Some result in equations where the target variable appears in complex functional forms that may not correspond to natural physical questions, such as solving for angular frequency in oscillatory systems yielding expressions involving square roots of differences that lack intuitive physical meaning. Additionally, certain transformations may obscure natural causal relationships—transforming from “force causes acceleration” to “acceleration determines force” maintains mathematical validity but may not reflect underlying physical causality. The LSR-Transform category represents a deliberate balance between mathematical rigor and physical meaningfulness by constraining the complexity of transformed problems to match original problems, focusing on semantic rather than syntactic challenges in scientific equation discovery, while maintaining the original scientific context and variable meanings to ensure that underlying physics remains relevant even when mathematical formulation changes. The varying scientific meaningfulness of transformations reflects broader challenges in automated scientific discovery that warrant future investigation. Automated discovery systems must incorporate mechanisms to evaluate not only data-driven correctness but also scientific plausibility and interpretability of generated hypotheses, as mathematical validity alone is insufficient for meaningful scientific contribution. The most effective approach to scientific equation discovery likely involves close collaboration between AI systems, which excel at exploring vast hypothesis spaces, and human domain scientists, who can" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.086, + 0.888, + 0.193 + ], + "angle": 0, + "content": "assess scientific meaningfulness and guide discovery directions based on deep contextual understanding. Future equation discovery methods could improve by incorporating literature retrieval tools to build grounding foundations for scientific context and domain knowledge, helping to prioritize discoveries that are mathematically valid, data-consistent, novel, and scientifically meaningful. The field needs evaluation frameworks that assess not just mathematical correctness but also scientific novelty, interpretability, and practical applicability of discovered equations, moving beyond narrow accuracy metrics toward a more comprehensive understanding of what constitutes valuable scientific discovery in the age of LLMs with their vast scientific knowledge." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.21, + 0.7, + 0.228 + ], + "angle": 0, + "content": "F. Comparison with Standard (non-LLM) Symbolic Regression Baselines" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.236, + 0.888, + 0.374 + ], + "angle": 0, + "content": "To further validate the utility of LLM-SRBench and demonstrate the advantages of LLM-based approaches, we conducted additional experiments comparing LLM-based methods with traditional symbolic regression techniques that do not incorporate domain knowledge. We evaluated PySR (Cranmer, 2023), a state-of-the-art symbolic regression method based on genetic programming, on all LLM-SRBench datasets. PySR operates purely on numerical data points without access to the scientific context, variable descriptions, or domain knowledge that LLM-based methods can leverage in discovery process. We used PySR's default configuration with the same computational budget (equivalent number of evaluations) as the LLM-based methods to ensure fair comparison. Table 3 presents the performance comparison between the best-performing LLM-based method from Table 1 and PySR across all LLM-SRBench datasets. The results reveal several key insights about the complementary strengths and limitations of non-LLM versus LLM-based approaches in equation discovery." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.38, + 0.889, + 0.562 + ], + "angle": 0, + "content": "PySR demonstrates competitive and sometimes even better numerical accuracy \\((\\mathrm{Acc}_{0.1})\\) across all datasets. However, PySR consistently shows significantly lower symbolic accuracy, particularly struggling with non-physics domains where it achieves \\(0\\%\\) symbolic accuracy on chemistry, biology, and material science datasets. The performance gap is most pronounced in problems that require specialized scientific knowledge. While PySR can fit mathematical patterns in the data, it lacks the scientific intuition to discover equations that align with established physical principles or domain-specific terminology. Interestingly, PySR shows relatively better performance on physics problems, achieving modest symbolic accuracy of \\(4.54\\%\\) on LSR-Synth Physics and \\(8.11\\%\\) on LSR-Transform (which is based on Feynman physics equations). This suggests that physics problems may contain mathematical patterns that are more aligned with the dictionary design in PySR. So they can be discovered better through the data-driven search pipeline designed in PySR. These findings strengthen the motivation for LLM-based scientific equation discovery and demonstrate that LLM-SRBench successfully captures challenges in equation discovery that traditional symbolic regression methods cannot adequately address through numerical data-driven optimization alone." + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.585, + 0.887, + 0.613 + ], + "angle": 0, + "content": "Table 3. Performance comparison between LLM-based methods and state-of-the-art non-LLM symbolic regression baseline PySR on LLM-SRBench. SA = Symbolic Accuracy (%), Acc0.1 = Accuracy to tolerance 0.1 (%)." + }, + { + "type": "table", + "bbox": [ + 0.169, + 0.62, + 0.807, + 0.743 + ], + "angle": 0, + "content": "
Dataset (Metric)LLM-SR (best) SA / Acc0.1LaSR (best) SA / Acc0.1SGA (best) SA / Acc0.1PySR SA / Acc0.1
LSR-Transform31.53 / 39.6412.61 / 50.459.91 / 8.118.11 / 56.76
LSR-Synth Chemistry11.11 / 66.662.77 / 38.920 / 16.660 / 41.67
LSR-Synth Biology25.30 / 58.338.33 / 20.834.16 / 12.510 / 25.0
LSR-Synth Physics9.91 / 36.369.91 / 31.814.54 / 9.094.54 / 29.55
LSR-Synth Material Science20.24 / 88.2828.12 / 72.040 / 36.110 / 68.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.273, + 0.768, + 0.698, + 0.782 + ], + "angle": 0, + "content": "Table 4: LSR-Synth mathematical equations for each scientific domain." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.792, + 0.88, + 0.888 + ], + "angle": 0, + "content": "
DomainEquation IDEquation
ChemistryCKR1-kA(t)2+kzA(t)2/(βA(t)4+1)
CKR2-kA(t)2-kA(t)+kw cos(log(A(t)+1))
CKR3-kA(t)+kw cos(log(A(t)+1))
" + }, + { + "type": "text", + "bbox": [ + 0.708, + 0.892, + 0.869, + 0.907 + ], + "angle": 0, + "content": "Continued on next page" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "table_caption", + "bbox": [ + 0.356, + 0.089, + 0.619, + 0.104 + ], + "angle": 0, + "content": "Table 4 - continued from previous page" + }, + { + "type": "table", + "bbox": [ + 0.097, + 0.106, + 0.879, + 0.88 + ], + "angle": 0, + "content": "
DomainEquation IDEquation
CKR4-kA(t)2-kA(t) exp(-ks t)+kw cos(log(A(t)+1))
CKR5-kA(t)2+kqA(t) log(γt+1)
CKR6-k√(A(t)+kfA(t)0.33
CKR7-kA(t) exp(-ks t)+km sin(√A(t))
CKR8-kA(t) exp(-ks t)+kw cos(log(A(t)+1))
CKR9-kA(t)2-kA(t)+kt sin(log(A(t)+1))
CKR10-k√A(t)+kw cos(log(A(t)+1))
CKR11-kA(t)2+kt sin(log(A(t)+1))
CKR12-kA(t)2+km sin(√A(t))
CKR13-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR14-kA(t)+kp sin(ωA(t))
CKR15-k√A(t)-kA(t) exp(-ks t)+kp sin(ωA(t))
CKR16-k√A(t)-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR17-kA(t)+kfA(t)0.33
CKR18-kA(t) exp(-ks t)+kfA(t)0.33
CKR19-kA(t)2+kp sin(ωA(t))
CKR20-kA(t)2-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR21-kA(t) exp(-ks t)+kp sin(ωA(t))
CKR22-kA(t) exp(-ks t)+kqA(t) log(γt+1)
CKR23-kA(t)2-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR24-k√A(t)+kp sin(ωA(t))
CKR25-k√A(t)-kA(t)2+kfA(t)0.33
CKR26-kA(t)+kt sin(log(A(t)+1))
CKR27-kA(t)2-kA(t) exp(-ks t)+km sin(√A(t))
CKR28-kA(t)2-kA(t) exp(-ks t)+kfA(t)0.33
CKR29-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR30-kA(t)-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR31-kA(t)-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR32-k√A(t)-kA(t)+kw cos(log(A(t)+1))
CKR33-kA(t)-kA(t) exp(-ks t)+kfA(t)0.33
CKR34-k√A(t)-kA(t)2+kt sin(log(A(t)+1))
CKR35-kA(t)2+kfA(t)0.33
CKR36-kA(t)+kqA(t)log(γt+1)
" + }, + { + "type": "text", + "bbox": [ + 0.709, + 0.884, + 0.869, + 0.899 + ], + "angle": 0, + "content": "Continued on next page" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.923, + 0.497, + 0.935 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "table_caption", + "bbox": [ + 0.356, + 0.089, + 0.619, + 0.104 + ], + "angle": 0, + "content": "Table 4 - continued from previous page" + }, + { + "type": "table", + "bbox": [ + 0.097, + 0.106, + 0.879, + 0.873 + ], + "angle": 0, + "content": "
DomainEquation IDEquation
BiologyBPG1r(1-P(t)/K0)P(t)+rP(t)0.33
BPG2rP(t)exp(-γt)+rP(t)2/(αP(t)+1)
BPG3βP(t)sin(ωt)+rP(t)exp(-γt)
BPG4r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
BPG5r(1-P(t)/K0)P(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG6r(1-P(t)/K0)P(t)+rP(t)2/(αP(t)+1)
BPG7-QαP(t)+r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG8r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+rP(t)0.33
BPG9r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG10r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
BPG11rP(t)0.33+rP(t)
BPG12r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)exp(-γt)
BPG13βP(t)sin(ωt)+r(1-P(t)/K0)P(t)
BPG14r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG15r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)+rP(t)exp(-γt)
BPG16rP(t)0.33+rP(t)exp(-γt)
BPG17r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG18r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)0.33
BPG19βP(t)sin(ωt)+r(1-P(t)/K0)P(t)+rP(t)
BPG20r(1-P(t)/K0)P(t)+rP(t)/tα
BPG21r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG22r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)/tα
BPG23r(1-exp(-γP(t)))P(t)+rP(t)exp(-γt)
BPG24r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
PhysicsPO1F0sin(t)-βsin(v(t))-ω02x(t)3-ω02x(t)exp(-|x(t)|)
PO2F0sin(t)-ω02x(t)-ω02x(t)exp(-|x(t)|)
PO3-αv(t)3-μ(1-x(t)2)v(t)-ω02x(t)-ω02x(t)exp(-|x(t)|)
PO4F0sin(t)-βsin(v(t))-2βv(t)
PO5F0sin(t)-αv(t)3-ω02(γ|v(t)|0.33+1)x(t)-ω02x(t)
PO6-βsin(v(t))-2βv(t)-ω02(γ|v(t)|0.33+1)x(t)-ω02x(t)3-ω02x(t)
PO7-βlog(|v(t)|+1)-2βv(t)-ω02x(t)3
PO8-αv(t)3-β|v(t)|0.33-ω02x(t)3
" + }, + { + "type": "text", + "bbox": [ + 0.709, + 0.877, + 0.869, + 0.892 + ], + "angle": 0, + "content": "Continued on next page" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "table_caption", + "bbox": [ + 0.356, + 0.089, + 0.619, + 0.104 + ], + "angle": 0, + "content": "Table 4 - continued from previous page" + }, + { + "type": "table", + "bbox": [ + 0.097, + 0.106, + 0.879, + 0.879 + ], + "angle": 0, + "content": "
DomainEquation IDEquation
PO9-β|v(t)|0.33 - ω02x(t)3
PO10F0sin(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)
PO11F0sin(t) - ω02(γt+1)x(t) - ω02x(t)3 - ω02x(t)
PO12-βsin(v(t)) - ω02(γt+1)x(t) - ω02x(t)3
PO13F0sin(t) - αv(t)3 - β|v(t)|0.33 - ω02(γt+1)x(t) - ω02x(t)
PO14F0sin(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO15F0sin(t) - βlog(|v(t)| + 1) - βsin(v(t)) - 2βv(t) - μ(1-x(t)2)v(t)
PO16F0sin(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t) - ω02x(t) exp(-|x(t)|)
PO17F0sin(t) - βsin(x(t))v(t) - βsin(v(t)) - ω02x(t)3
PO18F0sin(t) - βsin(x(t))v(t) - 2βv(t) - ω02x(t)
PO19-βsin(x(t))v(t) - ω02x(t)
PO20-2βv(t) - ω02x(t) exp(-|x(t)|)
PO21-αv(t)3 - β log(|v(t)| + 1) - 2βv(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO22F0sin(t) - βsin(x(t))v(t)
PO23-2βv(t) - β exp(-|x(t)|)v(t) - μ(1-x(t)2)v(t) - ω02x(t)3
PO24F0sin(t) - βlog(|v(t)| + 1) - ω02x(t) exp(-|x(t)|)
PO25F0sin(t) - αv(t)3 - β log(|v(t)| + 1)
PO26F0sin(t) - βsin(v(t))
PO27F0sin(t) - βlog(|v(t)| + 1) - 2βv(t) - ω02x(t)3
PO28F0sin(t) - αv(t)3 - 2βv(t) - βexp(-|v(t)|)v(t)
PO29-2βv(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3 - ω02x(t)
PO30-μ(1-x(t)2)v(t) - ω02(γt+1)x(t) - ω02x(t)3
PO31-αv(t)3 - βsin(x(t))v(t) - βsin(v(t)) - ω02x(t)3
PO32-ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3
PO33F0sin(t) - αv(t)3 - βexp(-|v(t)|)v(t) - ω02x(t)3
PO34-2βv(t) - μ(1-x(t)2)v(t) - ω02(γt+1)x(t) - ω02x(t)
PO35-2βv(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO36F0sin(t) - βsin(v(t)) - ω02(γ|v(t)|0.33 + 1)x(t)
PO37F0sin(t) - βexp(-|x(t)|)v(t)
PO38F0sin(t) - αv(t)3 - 2βv(t) - ω02(γt+1)x(t)
PO39-βsin(v(t)) - μ(1-x(t)2)v(t) - ω02x(t) exp(-|x(t)|)
PO40F0sin(t) - αv(t)3 - βexp(-|x(t)|)v(t) - μ(1-v(t)2)v(t)
PO41F0sin(t) - β|v(t)|0.33 - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3 - ω02x(t)
" + }, + { + "type": "text", + "bbox": [ + 0.709, + 0.884, + 0.868, + 0.898 + ], + "angle": 0, + "content": "Continued on next page" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.497, + 0.935 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "table_caption", + "bbox": [ + 0.356, + 0.089, + 0.618, + 0.104 + ], + "angle": 0, + "content": "Table 4 - continued from previous page" + }, + { + "type": "table", + "bbox": [ + 0.097, + 0.106, + 0.879, + 0.766 + ], + "angle": 0, + "content": "
DomainEquation IDEquation
PO42-μ(1-x(t)2)v(t)-ω02x(t)exp(-|x(t)|)
PO43F0sin(t)-αv(t)3-βsin(x(t))v(t)-2βv(t)
PO44F0sin(t)-βsin(x(t))v(t)-2βv(t)-μ(1-x(t)2)v(t)-ω02x(t) exp(-|x(t)|)
MaterialMatSci1E0ε(-αT(T-T0)+1)-β(T-T0)+εMη(T-T0)
MatSci2Hε3+KεNexp(-Q/(RT))+εηsin(T-T0)
MatSci3Hε3+η(T-T0)exp(-ε)
MatSci4Hε3+KεNexp(-Q/(RT))+ε3η(T-T0)
MatSci5E0ε2+η(T-T0)log(ε+1)
MatSci6E0ε(-αT(T-T0)+1)+KεNexp(-Q/(RT))+εMη(T-T0)
MatSci7E0ε(-αT(T-T0)+1)+εη(T-T0)2
MatSci8Hε3-β(T-T0)+η(T-T0)log(ε+1)
MatSci9E0ε(-αT(T-T0)+1)+εMη(T-T0)
MatSci10Hε3-β(T-T0)+ε3η(T-T0)
MatSci11Hε3+KεNexp(-Q/(RT))+εη(T-T0)2
MatSci12KεNexp(-Q/(RT))+ε3η(T-T0)
MatSci13E0ε(-αT(T-T0)+1)+KεNexp(-Q/(RT))+εηexp(-(T-T0)2)
MatSci14-β(T-T0)+εηexp(-(T-T0)2)
MatSci15-β(T-T0)+εMη(T-T0)
MatSci16E0ε(-αT(T-T0)+1)+εηexp(-(T-T0)2)
MatSci17E0ε2+εη(T-T0)2
MatSci18E0ε(-αT(T-T0)+1)-β(T-T0)+η(T-T0)log(ε+1)
MatSci19Hε3+η(T-T0)sin(ε)
MatSci20E0ε2-β(T-T0)+ε3η(T-T0)
MatSci21E0ε2+εηsin(T-T0)
MatSci22KεNexp(-Q/(RT))-β(T-T0)+η(T-T0)log(ε+1)
MatSci23E0ε(-αT(T-T0)+1)+Hε3+η(T-T0)sin(ε)
MatSci24KεNexp(-Q/(RT))+εηsin(T-T0)
MatSci25E0ε2+E0ε(-αT(T-T0)+1)+η(T-T0)log(ε+1)
" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "code_caption", + "bbox": [ + 0.301, + 0.142, + 0.68, + 0.171 + ], + "angle": 0, + "content": "Ground Truth: \\(\\frac{dP}{dt} = 0.9540\\left(1 - \\frac{P}{96.9069}\\right)P + 0.9540P^{\\frac{1}{3}}\\)" + }, + { + "type": "code_caption", + "bbox": [ + 0.207, + 0.199, + 0.428, + 0.213 + ], + "angle": 0, + "content": "a) Predicted: Direct (Llama-3.1-8b)" + }, + { + "type": "code", + "bbox": [ + 0.194, + 0.226, + 0.784, + 0.427 + ], + "angle": 0, + "content": "def equation(t: np.ndarray, P: np.ndarray, params: np.ndarray) -> np.ndarray:\n ''' Mathematical function for Population growth rate\nArgs:\n t: A numpy array representing observations of Time.\n P: A numpy array representing observations of Population at time t.\n params: Array of numeric constants or parameters to be optimized\nReturn:\n A numpy array representing Population growth rate as the result of applying the mathematical function to the inputs.\n''' output = (\n params[0] * P # A linear term based on population size\n + params[1] * P ** 2 # A squared term for birth or death ratio\n + params[2] * t + params[3] * P * t # Linear and linear cross term\n + params[4]\n)\nreturn output" + }, + { + "type": "code_caption", + "bbox": [ + 0.209, + 0.458, + 0.42, + 0.471 + ], + "angle": 0, + "content": "b) Predicted: SGA (Llama-3.1-8b)" + }, + { + "type": "code", + "bbox": [ + 0.188, + 0.486, + 0.789, + 0.848 + ], + "angle": 0, + "content": "class SymbolicEquation(nnModule):\n def __init__(self, r: float = 0.5, K: float = 100.0, time_scale: float = 0.05):\n Define trainable continuous parameters for differentiable optimization.\n Tentatively initialize the parameters with the default values in args.\n Params:\n r (float): growth rate.\n K (float): carrying capacity.\n time_scale (float): time-dependent coefficient.\n super().__init()\n self.r = nn_PARAMETER(torch.tensor(r, dtype=torch.float)) # (1,) \n self.K = nn_PARAMETER(torch.tensor(K, dtype=torch.float)) # (1,) \n self.time_scale = nn_PARAMETER(torch.tensor(time_scale, dtype=torch.float))\n # # (1,) \n # self.r = nn_PARAMETER(torch.tensor(r, dtype=torch.float)) \ndef forward(self, t: torch.Tensor, P: torch.Tensor) -> torch.Tensor:\n \"\"Mathematical function for Population growth rate\n Params:\n t: Time.\n P: Population at time t.\n Return:\n dP_dt: Population growth rate.\n \"\"\n # # (B,) \n dP_dt = self.r * torch.exp(-self.time_scale*t) * P * (1 - P / self.K) # (B,) \nreturn dP_dt" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.262, + 0.416, + 0.275 + ], + "angle": 0, + "content": "c) Predicted: LaSR (Llama-3.1-8b)" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.288, + 0.779, + 0.316 + ], + "angle": 0, + "content": "\\[\n\\frac {d P}{d t} = \\left(- 9 1 0. 3 1 - \\left(\\left(P \\cdot (P + \\sqrt {P})\\right) + \\log \\left((t + 4. 6 3 9 6) - (- 0. 6 6 0 9 5)\\right)\\right) \\cdot \\sin \\left(\\frac {- 0 . 9 5 5 7 2}{e ^ {t} - t}\\right)\\right) + 9 1 0. 3\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.359, + 0.43, + 0.372 + ], + "angle": 0, + "content": "d) Predicted: LLM-SR (Llama-3.1-8b)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.388, + 0.7, + 0.405 + ], + "angle": 0, + "content": "def equation(t: np.ndarray, P: np.ndarray, params: np.ndarray) -> np.ndarray:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.409, + 0.772, + 0.43 + ], + "angle": 0, + "content": "Mathematical function for Population growth rate with a logistic growth model, decay, and periodicity." + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.441, + 0.242, + 0.449 + ], + "angle": 0, + "content": "Args:" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.451, + 0.573, + 0.46 + ], + "angle": 0, + "content": "t: A numpy array representing observations of Time." + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.462, + 0.679, + 0.471 + ], + "angle": 0, + "content": "P: A numpy array representing observations of Population at time t." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.472, + 0.786, + 0.491 + ], + "angle": 0, + "content": "params: Array of numeric constants or parameters to be optimized (k, M, r, a, b, A, w, tau)" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.502, + 0.261, + 0.51 + ], + "angle": 0, + "content": "Returns:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.512, + 0.761, + 0.532 + ], + "angle": 0, + "content": "A numpy array representing Population growth rate as the result of applying the mathematical function to the inputs." + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.532, + 0.229, + 0.538 + ], + "angle": 0, + "content": "1 1" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.542, + 0.506, + 0.552 + ], + "angle": 0, + "content": "Apply transformations to inputs (if needed)" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.553, + 0.442, + 0.562 + ], + "angle": 0, + "content": "\\(t = t / np.\\max(t)\\) #Normalize time" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.563, + 0.482, + 0.573 + ], + "angle": 0, + "content": "P = P / np.max(P) #Normalize population" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.584, + 0.521, + 0.593 + ], + "angle": 0, + "content": "Compute the rates using logistic growth model" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.594, + 0.7, + 0.604 + ], + "angle": 0, + "content": "\\(\\mathrm{dP\\_dt} = \\mathrm{params}[0] * \\mathrm{P} * (1 - \\mathrm{P}) - \\mathrm{params}[1] * \\mathrm{P} - \\mathrm{params}[2] * \\mathrm{P}**\\mathrm{params}[3]\\)" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.615, + 0.422, + 0.624 + ], + "angle": 0, + "content": "Compute the periodicity factor" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.625, + 0.718, + 0.635 + ], + "angle": 0, + "content": "periodic_factor = params[4] * np.sin(param[5] * (t - params[6]) + params[7])" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.646, + 0.456, + 0.655 + ], + "angle": 0, + "content": "Combine growth rate and periodicity" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.656, + 0.528, + 0.666 + ], + "angle": 0, + "content": "\\(\\mathrm{dP\\_dt\\_with\\_periodicity} = \\mathrm{dP\\_dt} * \\mathrm{periodic\\_factor}\\)" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.677, + 0.402, + 0.687 + ], + "angle": 0, + "content": "return dP_dt_with_periodicity" + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.713, + 0.885, + 0.741 + ], + "angle": 0, + "content": "Figure 14. Example of output hypotheses from different LLM scientific equation discovery methods for BPG0 problem in LSR-Synth biology domain." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.254, + 0.136, + 0.347, + 0.148 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "equation", + "bbox": [ + 0.364, + 0.156, + 0.611, + 0.172 + ], + "angle": 0, + "content": "\\[\n0. 1 9 \\cdot A (t) + 0. 1 9 \\cdot \\cos (\\log (A (t) + 1))\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.199, + 0.427, + 0.213 + ], + "angle": 0, + "content": "a) Predicted: Direct (Llama-3.1-8b)" + }, + { + "type": "code", + "bbox": [ + 0.201, + 0.224, + 0.751, + 0.424 + ], + "angle": 0, + "content": "def equation(t: np.ndarray, A: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Rate of change of concentration (Growth, Reaction or Decay)\n Args:\n t: A numpy array representing observations of Time.\n A: A numpy array representing observations of Concentration at time t.\n params: Array of numeric constants or parameters to be optimized.\n - params[0]: k (Rate constant, e.g., 0.1 min^-1)\n - params[1]: C0 (initial concentration, e.g., 10) for decay or growth\n models\n Return:\n A numpy array representing Rate of change of concentration as the result of applying the mathematical function to the inputs.\n - equation implemented under assumption that k*A represents linear decay, or linear growth.\n output = params[0] * A + params[1] * params[2]\n return output" + }, + { + "type": "title", + "bbox": [ + 0.208, + 0.475, + 0.421, + 0.489 + ], + "angle": 0, + "content": "b) Predicted: SGA (Llama-3.1-8b)" + }, + { + "type": "code", + "bbox": [ + 0.214, + 0.5, + 0.761, + 0.84 + ], + "angle": 0, + "content": "class SymbolicEquation(nnModule): def init(self,k:float \\(= 9.00\\mathrm{e} - 02\\) A_0:float \\(= 9.60\\mathrm{e} - 01\\) A_t:float \\(= 9.60\\mathrm{e} - 01\\) .. Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args. Args: k(float):time-dependent reaction rate constant. A_0(float):initial concentration. A_t(float):time-dependent change in concentration. super().__init_() #(1,) self.k \\(\\equiv\\) nn.Parameters(torch.tensor(k,dtype \\(\\coloneqq\\) torch.float)) self.A_0 \\(\\equiv\\) nn.Parameterrtorch.tensor(A_0,dtype \\(\\coloneqq\\) torch.float) # (1,) self.A_t \\(\\equiv\\) nn.Parameterrtorch.tensor(A_t,dtype \\(\\coloneqq\\) torch.float) # (1,) def forward(self,t:torch.Tensor,A:torch.Tensor) -> torch.Tensor: \"\"Mathematical function for Rate of change of concentration in chemistry reaction kinetics Args: t:Time.#(B,1) A:Concentration at time t.#(B,1) Return: dA_dt:Rate of change of concentration in chemistry reaction kinetics. (# (B,1)" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.33, + 0.498, + 0.345 + ], + "angle": 0, + "content": "c) Predicted: LaSR (Llama-3.1-8b)" + }, + { + "type": "equation", + "bbox": [ + 0.252, + 0.356, + 0.717, + 0.375 + ], + "angle": 0, + "content": "\\[\n\\left(- 0. 0 0 4 0 4 0 2 / \\left(1. 3 2 2 2 ^ {t}\\right)\\right) * \\left(\\left(A ^ {t}\\right) ^ {\\cos \\left(\\left(\\left(t + \\left(\\left(A / (A / A)\\right) - 0. 0 4 3 4 7 7\\right)\\right) ^ {A}\\right) - A\\right) + \\left(\\left(0. 7 5 6 2 9 ^ {1. 2 0 2 8} t\\right)\\right)\\right)} \\left. \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.419, + 0.43, + 0.433 + ], + "angle": 0, + "content": "d) Predicted: LLM-SR (Llama-3.1-8b)" + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.455, + 0.764, + 0.614 + ], + "angle": 0, + "content": "def equation(t: np.ndarray, A: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Rate of change of concentration in chemistry reaction kinetics\n Args:\n t: A numpy array representing observations of Time.\n A: A numpy array representing observations of Concentration at time t.\n params: Array of numeric constants or parameters to be optimized\n Return:\n A numpy array representing Rate of change of concentration in chemistry reaction kinetics as the result of applying the mathematical function to the inputs.\n output = params[0] * A + params[1] * A**2 + params[2] * A**3 + params[3] * t + params[4]\n return output" + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.638, + 0.886, + 0.666 + ], + "angle": 0, + "content": "Figure 15. Example of output hypotheses from different LLM scientific equation discovery methods for CKR2 problem in LSR-Synth chemistry domain." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.495, + 0.935 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "title", + "bbox": [ + 0.252, + 0.101, + 0.347, + 0.113 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.116, + 0.726, + 0.132 + ], + "angle": 0, + "content": "\\[\n3 1. 1 2 \\cdot \\varepsilon \\cdot (- 0. 0 5 \\cdot (T - 3 1 6. 1 9) + 1) - 0. 2 3 \\cdot (T - 3 1 6. 1 9) + \\varepsilon^ {0. 6 0} \\cdot 3. 6 3 \\cdot (T - 3 1 6. 1 9)\n\\]" + }, + { + "type": "code_caption", + "bbox": [ + 0.205, + 0.165, + 0.425, + 0.179 + ], + "angle": 0, + "content": "a) Predicted: Direct (Llama-3.1-8b)" + }, + { + "type": "code", + "bbox": [ + 0.201, + 0.189, + 0.739, + 0.407 + ], + "angle": 0, + "content": "def equation(eppsilon: np.ndarray, T: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Stress\nArgs:\n epsilon: A numpy array representing observations of Strain.\n T: A numpy array representing observations of Temperature.\n params: Array of numeric constants or parameters to be optimized\nReturn:\n A numpy array representing Stress as the result of applying the mathematical function to the inputs.\n \"\"\n reference_temperature = 0\n sigma_0 = params[0]\n alpha = params[1]\n n = params[2]\n linear_epsilon_term = params[3] * epsilon\n multiplicative_epsilon_term = params[4] * epsilon\n linear_temperature_term = params[5] * T\n output = sigma_0 * (1 + alpha * (T - reference_temperature)) * epsilon**\n linear_epsilon_term + multiplicative_epsilon_term + linear_temperature_term\n return output" + }, + { + "type": "code_caption", + "bbox": [ + 0.209, + 0.448, + 0.421, + 0.462 + ], + "angle": 0, + "content": "b) Predicted: SGA (Llama-3.1-8b)" + }, + { + "type": "code", + "bbox": [ + 0.214, + 0.484, + 0.734, + 0.878 + ], + "angle": 0, + "content": "class SymbolicEquation(nnModule): def__init__(self,c_0:float \\(= 9.83\\mathrm{e} - 01\\) ,c_1:float \\(= 9.86\\mathrm{e} - 01\\) ,c_2:float \\(= 9.83\\mathrm{e} - 01\\) a:float \\(= -3.73\\mathrm{e} - 03\\) ,b:float \\(= -1.11\\mathrm{e} - 02\\) ,d:float \\(= 2.63\\mathrm{e} - 02\\) : Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args. \nArgs: c_0(float): coefficient. c_1(float): coefficient. c_2(float): coefficient. a(float): coefficient for quadratic term. b(float): coefficient for exponential term. d(float): coefficient for non-linear relationship between Strain and Temperature. super()._init_(self.c_0=nn.Parameters(torch.tensor(c_0,dtype \\(\\equiv\\) torch.float)) # (1,) self.c_1 = nn.Parameter(torch.tensor(c_1,dtype \\(\\equiv\\) torch.float)) # (1,) self.c_2 = nn.Parameter(torch.tensor(c_2,dtype \\(\\equiv\\) torch.float)) # (1,) self.a = nn.Parameter(torch.tensor(a,dtype \\(\\equiv\\) torch.float)) # (1,) self.b = nn.Parameter(torch.tensor(b,dtype \\(\\equiv\\) torch.float)) # (1,) self.d = nn.Parameter(torch.tensor(d,dtype \\(\\equiv\\) torch.float)) # (1,) \ndef forward(self,epsilon:torch.Tensor,T:torch.Tensor) -> torch.Tensor:\"\"\"Mathematical function for Stress \nArgs: epsilon:Strain.#(B,1) T:Temperature.#(B,1) \nReturn: sigma:Stress.#(B,1) \"\"\" # (B,1) sigma \\(=\\) self.c_0 \\(^+\\) self.c_1 \\* torch.exp(self.a \\* epsilon \\(^+\\) self.b \\*T)+ self.c_2 \\* torch.cos(self.d \\* epsilon \\*T) return sigma" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.282, + 0.325, + 0.5, + 0.339 + ], + "angle": 0, + "content": "c) Predicted: LaSR (Llama-3.1-8b)" + }, + { + "type": "equation", + "bbox": [ + 0.246, + 0.347, + 0.73, + 0.374 + ], + "angle": 0, + "content": "\\[\n\\left(\\left(\\sqrt {\\epsilon} \\cdot (- 7 1 2. 3 6) + \\left(\\left(\\frac {\\sqrt {\\epsilon}}{1 . 3 7 9 2 ^ {\\epsilon}} \\cdot 2. 2 7 9 8\\right) \\cdot (T - \\epsilon) + \\epsilon\\right) + 6. 8 1 2 5\\right) \\cdot 1. 5 0 7 6 ^ {\\sqrt {\\epsilon}}\\right) - \\sin (\\log (T))\n\\]" + }, + { + "type": "code_caption", + "bbox": [ + 0.197, + 0.409, + 0.431, + 0.423 + ], + "angle": 0, + "content": "d) Predicted: LLM-SR (Llama-3.1-8b)" + }, + { + "type": "code", + "bbox": [ + 0.201, + 0.443, + 0.767, + 0.613 + ], + "angle": 0, + "content": "def equation(epsilon: np.ndarray, T: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Stress\nArgs:\n epsilon: A numpy array representing observations of Strain.\n T: A numpy array representing observations of Temperature.\n params: Array of numeric constants or parameters to be optimized\nReturn:\n A numpy array representing Stress as the result of applying the mathematical function to the inputs.\n>>> output = params[0] * (epsilon ** (1/params[4]) - 1) + \\\n params[1] * (epsilon ** (1/params[6]) - 1) + \\\n params[2] * (epsilon ** (1/params[8]) - 1) * np.exp.params[3] * epsilon) + \\\n params[5] * epsilon + params[6] * T + params[7]\nreturn output" + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.646, + 0.885, + 0.674 + ], + "angle": 0, + "content": "Figure 16. Example of output hypotheses from different LLM scientific equation discovery methods for MatSci0 problem in LSR-Synth material science domain." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.254, + 0.118, + 0.347, + 0.131 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "equation", + "bbox": [ + 0.292, + 0.135, + 0.69, + 0.155 + ], + "angle": 0, + "content": "\\[\nF _ {0} \\cdot \\sin (t) - \\beta \\cdot \\sin (v (t)) - \\omega_ {0} ^ {2} \\cdot x (t) ^ {3} - \\omega_ {0} ^ {2} \\cdot x (t) \\cdot e ^ {- | x (t) |}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.182, + 0.427, + 0.196 + ], + "angle": 0, + "content": "a) Predicted: Direct (Llama-3.1-8b)" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.207, + 0.721, + 0.227 + ], + "angle": 0, + "content": "def equation(x: np.ndarray, t: np.ndarray, v: np.ndarray, params: np.ndarray) -> np.ndarray:" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.227, + 0.496, + 0.236 + ], + "angle": 0, + "content": "\"\"Mathematical function for Acceleration" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.247, + 0.256, + 0.256 + ], + "angle": 0, + "content": "Args:" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.257, + 0.668, + 0.266 + ], + "angle": 0, + "content": "x: A numpy array representing observations of Position at time t." + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.267, + 0.576, + 0.276 + ], + "angle": 0, + "content": "t: A numpy array representing observations of Time." + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.277, + 0.667, + 0.286 + ], + "angle": 0, + "content": "v: A numpy array representing observations of Velocity at time t." + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.287, + 0.663, + 0.296 + ], + "angle": 0, + "content": "params: Array of numeric constants or parameters to be optimized" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.307, + 0.268, + 0.315 + ], + "angle": 0, + "content": "Return:" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.316, + 0.695, + 0.335 + ], + "angle": 0, + "content": "A numpy array representing Acceleration as the result of applying the mathematical function to the inputs." + }, + { + "type": "text", + "bbox": [ + 0.222, + 0.336, + 0.245, + 0.342 + ], + "angle": 0, + "content": "1" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.345, + 0.668, + 0.355 + ], + "angle": 0, + "content": "Since acceleration is Force/mass and Force is mass*velocity change," + }, + { + "type": "text", + "bbox": [ + 0.222, + 0.356, + 0.424, + 0.366 + ], + "angle": 0, + "content": "we represent this equation as" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.367, + 0.767, + 0.384 + ], + "angle": 0, + "content": "a = dv/dt or in our representation format, given our position x it will correspond" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.385, + 0.76, + 0.405 + ], + "angle": 0, + "content": "a = d^2x/dt^2 or by simple numeric analogy 4th order approximation involving high degree dependency functions:" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.406, + 0.703, + 0.425 + ], + "angle": 0, + "content": "output = (params[0]*x**4 + params[1]*x**3 + params[2]*x**2 + params[3]*x + params[4]) + params[5]*v**2 + params[6]*t + params[7]*(x**2 + v)" + }, + { + "type": "text", + "bbox": [ + 0.222, + 0.426, + 0.309, + 0.435 + ], + "angle": 0, + "content": "return output" + }, + { + "type": "title", + "bbox": [ + 0.208, + 0.464, + 0.422, + 0.478 + ], + "angle": 0, + "content": "b) Predicted: SGA (Llama-3.1-8b)" + }, + { + "type": "code", + "bbox": [ + 0.214, + 0.489, + 0.734, + 0.536 + ], + "angle": 0, + "content": "class SymbolicEquation(nnModule): def init(self,c_0: float \\(= 9.50\\mathrm{e} - 01\\) ,c_1: float \\(= 9.50\\mathrm{e} - 01\\) ,c_2: float \\(= 9.50\\mathrm{e} - 01\\) ,c_3: float \\(= 1.05\\mathrm{e} + 00\\) :" + }, + { + "type": "text", + "bbox": [ + 0.258, + 0.54, + 0.718, + 0.56 + ], + "angle": 0, + "content": "Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args." + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.57, + 0.294, + 0.578 + ], + "angle": 0, + "content": "Args:" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.579, + 0.448, + 0.588 + ], + "angle": 0, + "content": "c_0(float): coefficient." + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.589, + 0.446, + 0.597 + ], + "angle": 0, + "content": "c 1(float): coefficient." + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.598, + 0.448, + 0.606 + ], + "angle": 0, + "content": "c_2(float); coefficient." + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.608, + 0.448, + 0.617 + ], + "angle": 0, + "content": "c 3 (float): coefficient." + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.619, + 0.284, + 0.625 + ], + "angle": 0, + "content": "1 1" + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.628, + 0.705, + 0.647 + ], + "angle": 0, + "content": "super().__init_(self.c 0 = nn.Parameter(torch.tensor(c 0, dtype=torch.float)) # (1,)" + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.648, + 0.704, + 0.656 + ], + "angle": 0, + "content": "self.c1 = nn_PARAMETER(torch.tensor(c1, dtype=torch.float)) # (1,)" + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.658, + 0.704, + 0.667 + ], + "angle": 0, + "content": "self.c_2 = nn_PARAMETER(torch:tensor(c_2, dtype=torch.float)) # (1,)" + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.668, + 0.704, + 0.678 + ], + "angle": 0, + "content": "self.c_3 = nn_PARAMETER(torch.tensor(c_3, dtype=torch.float)) # (1,)" + }, + { + "type": "text", + "bbox": [ + 0.234, + 0.696, + 0.695, + 0.716 + ], + "angle": 0, + "content": "def forward(self, x: torch.Tensor, t: torch.Tensor, v: torch.Tensor) -> torch.Tensor:" + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.717, + 0.752, + 0.726 + ], + "angle": 0, + "content": "\"\"Mathematical function for Acceleration in Nonl-linear Harmonic Oscillator" + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.737, + 0.294, + 0.746 + ], + "angle": 0, + "content": "Args:" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.747, + 0.48, + 0.755 + ], + "angle": 0, + "content": "x: Position at time t. # (B,)" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.757, + 0.389, + 0.765 + ], + "angle": 0, + "content": "t: Time. # (B,)" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.767, + 0.48, + 0.776 + ], + "angle": 0, + "content": "v: Velocity at time t. # (B,)" + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.787, + 0.306, + 0.795 + ], + "angle": 0, + "content": "Return:" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.796, + 0.692, + 0.805 + ], + "angle": 0, + "content": "dv dt: Acceleration in Nonl-linear Harmonic Oscillator. # (B,)" + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.806, + 0.282, + 0.812 + ], + "angle": 0, + "content": "1 1" + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.815, + 0.553, + 0.825 + ], + "angle": 0, + "content": "Non-linear relationship between x, t, and v" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.826, + 0.758, + 0.845 + ], + "angle": 0, + "content": "dv_dt = self.c_0 * torch.exp(-self.c_1 * x) + self.c_2 * torch.cos(self.c_3 * t + self.c_3 * x) # (B, )" + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.846, + 0.341, + 0.855 + ], + "angle": 0, + "content": "return dv_dt" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.924, + 0.497, + 0.935 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.057, + 0.8, + 0.071 + ], + "angle": 0, + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.281, + 0.289, + 0.498, + 0.303 + ], + "angle": 0, + "content": "c) Predicted: LaSR (Llama-3.1-8b)" + }, + { + "type": "equation", + "bbox": [ + 0.369, + 0.312, + 0.611, + 0.343 + ], + "angle": 0, + "content": "\\[\n\\left(\\frac {\\left((x + x) \\cdot \\left(\\left(\\frac {\\sqrt {x ^ {\\mathrm {l i n e s}} - \\frac {x}{\\sin x}}}{x}\\right) \\cdot \\sin (1 . 1 4 7 8 \\cdot t) - x\\right) \\cdot \\sin (x)\\right)}{1 . 7 0 5 2} - \\sin (0. 0 0 3 2 8 2 7)\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.381, + 0.43, + 0.394 + ], + "angle": 0, + "content": "d) Predicted: LLM-SR (Llama-3.1-8b)" + }, + { + "type": "code", + "bbox": [ + 0.201, + 0.413, + 0.721, + 0.442 + ], + "angle": 0, + "content": "def equation(x: np.ndarray, t: np.ndarray, v: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Acceleration" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.453, + 0.255, + 0.461 + ], + "angle": 0, + "content": "Args:" + }, + { + "type": "code", + "bbox": [ + 0.246, + 0.464, + 0.667, + 0.503 + ], + "angle": 0, + "content": "x: A numpy array representing observations of Position at time t. \nt: A numpy array representing observations of Time. \nv: A numpy array representing observations of Velocity at time t. \nparams: Array of numeric constants or parameters to be optimized" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.512, + 0.267, + 0.52 + ], + "angle": 0, + "content": "Return:" + }, + { + "type": "code", + "bbox": [ + 0.202, + 0.522, + 0.778, + 0.641 + ], + "angle": 0, + "content": "A numpy array representing Acceleration as the result of applying the mathematical function to the inputs. \n```python\n```\n# Since acceleration is Force/mass and Force is mass*velocity change,\n# we represent this equation as\n# a = dv/dt or in our representation format, given our position x it will correspond to\n# a = d^2x/dt^2 or by simple numeric analogy 4th order approximation involving high degree dependency functions:\noutput = (params[0]*x**4 + params[1]*x**3 + params[2]*x**2 + params[3]*x + params[4])\n+ params[5]*v**2 + params[6]*t + params[7]*(x**2 + v)\nreturn output" + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.68, + 0.886, + 0.709 + ], + "angle": 0, + "content": "Figure 17. Example of output hypotheses from different LLM scientific equation discovery methods for PO0 problem in LSR-Synth physics domain." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "35" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10415/6118f0df-c806-4166-9486-ac165b1c4226_origin.pdf b/data/2025/2504_10xxx/2504.10415/6118f0df-c806-4166-9486-ac165b1c4226_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0202c871fa003094dba3719977a5569f09bc3233 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/6118f0df-c806-4166-9486-ac165b1c4226_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0eb6f1805a57b5ca2ddf6d44fc79b78cb4adce2326fd94210774bcc6f7eca134 +size 5023869 diff --git a/data/2025/2504_10xxx/2504.10415/full.md b/data/2025/2504_10xxx/2504.10415/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0b817b44d91f6534929e0cec66509ecf30572f04 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/full.md @@ -0,0 +1,1087 @@ +# LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models + +Parshin Shojaee* 1 Ngoc-Hieu Nguyen* 2 Kazem Meidani 34 Amir Barati Farimani 3 Khoa D Doan 2 Chandan K Reddy 1 + +Website: https://github.com/deep-symbolic-mathematics/llm-srbench + +# Abstract + +Scientific equation discovery has long been a cornerstone of scientific progress, enabling the derivation of laws governing natural phenomena. Recently, Large Language Models (LLMs) have gained interest for this task due to their potential to leverage embedded scientific knowledge for hypothesis generation. However, it is difficult to assess the true discovery capabilities of these methods because existing benchmarks often use well-known equations. This makes them vulnerable to memorization by LLMs and results in inflated performance metrics that do not reflect genuine discovery. In this paper, we introduce LLM-SRBench, a comprehensive benchmark with 239 challenging problems across four scientific domains specifically designed to evaluate LLM-based scientific equation discovery methods while preventing trivial memorization. Our benchmark comprises two main categories: LSR-Transform, which transforms common physical models into less common mathematical representations to test reasoning beyond memorized forms, and LSR-Synth, which introduces synthetic, discovery-driven problems requiring data-driven reasoning. Through extensive evaluation of several state-of-the-art methods, using both open and closed LLMs, we find that the best-performing system so far achieves only $31.5\%$ symbolic accuracy. These findings highlight the challenges of scientific equation discovery, positioning LLM-SRBench as a valuable resource for future research. + +![](images/29b608f1ad835a7b4b3cf0055c416cc691294f5fa75a973e1ef9650ca2acd5dc.jpg) +Figure 1. Error analysis comparing simple LLM sampling (Llama-3.1-8B) on 100 Feynman problems versus LLM-SRBench datasets (LSR-Transform and LSR-Synth). The sharp drops in numeric error curves and considerably lower symbolic error for Feynman problems suggest memorization rather than gradual discovery. + +# 1. Introduction + +Equation discovery, the process of uncovering symbolic mathematical expressions from observational data, has been a cornerstone of scientific advancement. This task, also known as symbolic regression (SR), goes beyond mere data-driven predictive modeling by seeking interpretable mathematical relations that reveal the underlying mechanisms of natural phenomena. When scientists derive mathematical equations from empirical data, they gain more than just predictive power – they obtain insights into fundamental physical principles, enable extrapolation beyond observed data, and facilitate knowledge transfer across scientific domains (Langley, 1981; Schmidt & Lipson, 2009). + +Standard approaches to equation discovery have primarily relied on genetic programming (GP) and evolutionary algorithms (Cranmer, 2023; La Cava et al., 2021), which represent mathematical expressions as trees and navigate the vast space of possible equations through evolutionary search techniques. However, these methods face two fundamental challenges. First, the NP-hard nature of equation discovery (Virgolin & Pissis, 2022) makes their random mutation and crossover operations computationally prohibitive across + +![](images/94c039b1cedc695be2873c2dcb0437ad37c78b74f2f5c36f13aca7b21cf0c837.jpg) + +# Goal / Instruction + +- Discover the mathematical equation/law that describes [output variable] based on given [input features]. + +- Use domain-specific knowledge of [the scientific field] and provided data samples to find an equation that is scientifically valid and fits the data well. + +![](images/16fbc11c1ac7574b7d1a59cd72413c0b5c055e02f015f88a1e7a9732b7b601d7.jpg) + +# Scientific Context + +Problem description + +Variable names and descriptions +Example: + +Find an equation in the field of classical mechanics that describes the mass $(m)$ needed to store energy in an oscillating system, given physical input variables: mean stored energy $(E_{m})$ , driving frequency $(\omega)$ , natural frequency $(\omega_{n})$ and amplitude $(x)$ . + +![](images/dd4ace02bfdbbc327dd22c6ca51c918e6611e6e1a0a6dffb9ff8c4f2e067743e.jpg) +Figure 2. Overview of the LLM-based Scientific Equation Discovery. The benchmark tasks (left) combine scientific context with numerical data. The discovery process (middle) iteratively leverages LLM's scientific knowledge and data-driven reasoning to generate hypotheses for underlying equations. Discovered hypotheses, represented as equation strings, trees, or programs, are then evaluated (right) using multiple metrics including data fidelity, symbolic accuracy, and computational efficiency. + +Data + +
\( E_{\mathrm {n}} \)ω\( \omega_0 \)xm
4.71.22.31.51.2
3.42.72.73.10.1
i
2.81.53.61.40.4
+ +![](images/eaaa041fd9512203f4e34c4bec9c0a6c971fa1e475a5cc93922305ebde0dc5dc.jpg) + +# Typical Workflow + +![](images/b4248be90e578b449703e07ee0bf472829ee92a629bf290e7b43622f53874a51.jpg) + +LLM internal scientific knowledge + +![](images/2943312887403cbd3614ec26d86312c9ed3797876e4b24a507d8b8ccaf08be34.jpg) + +Reasoning and planning + +![](images/83fdb943052af6819deb8c9f9e526ad919dcda5736b655b45a9e19dc080f02a4.jpg) + +Programming + +![](images/a7c6d759afe9fd77afb493d4e3d1f4cb1fd3debd494f956b9c7e6f32ff6755e4.jpg) + +![](images/a2d3aa7e47027c5fc32383269784b60c4e30210c9a1ddb79ea5b1ae367be0d02.jpg) + +Parameter Optimization + +![](images/bb81ea4b5f5d5a4f975ff0466f5ed65eeae3a9584feb539e69d440e2f25d1471.jpg) + +Simulation + +![](images/17beb428c63f2aabd0dfe88d4ab8d11372d076979bffbc2f467162f6659a2623.jpg) + +Experiments + +![](images/4336f60ac23a31b7303fd2bcd302d1a07db5ad0fb5084d34cb24e039edff4412.jpg) + +Statistical Fit to Data + +![](images/b2f081c94fa3b127bfd848223e598ebab8dca7669779b99dbbb482707e9719d8.jpg) + +# Hypothesis + +- Discovered mathematical equation represented by expressions, trees, programs, etc. + +- Supporting explanations / reasoning + +$m = 4^{*}E n / (x^{**}2^{*}$ + +(omega\*\*2 + omega_0\*\*2)) + +![](images/e19abcc9793b69233bc2068d01085943b55800e6c5368956853e58e8c2357a1e.jpg) + +def equation(E_n, omega, omega_0, x, params): # Energy-mass ratio normalized by parameter numerator $=$ params[0] $\ast$ E n + +Combined frequency and amplitude scaling effects denominator $=$ omega++2 $\ast$ x++2 + omega_0**2 $\ast$ x**2 m $=$ numerator / denominator return m + +# Evaluation + +Data Fidelity: +In-Domain accuracy +- Out-of-Domain generalization + +- Symbolic Accuracy: + +Human expert/LLM evaluator +Scientific plausibility +Interpretability + +Computational Efficiency + +vast search spaces. Second, unlike human scientists who leverage their domain knowledge and expertise to guide hypothesis formation, these approaches are mostly purely data-driven, and isolated from existing scientific knowledge. These limitations have motivated researchers to develop methods that incorporate scientific domain knowledge into the equation discovery process. + +Large Language Models (LLMs) have recently emerged as a promising solution to these challenges, offering a new paradigm for scientific equation discovery. LLMs, trained on vast corpora of scientific literature, possess extensive embedded scientific knowledge. This has sparked significant interest in leveraging LLMs for scientific equation discovery, with several recent works demonstrating their potential (Shojae et al., 2024b; Ma et al., 2024; Grayeli et al., 2024; Merler et al., 2024; Du et al., 2024; Reddy & Shojaee, 2024; Zhang et al., 2024). These LLM-based approaches have shown to enhance the equation hypothesis generation process by incorporating scientific priors, guiding the exploration of equation search spaces more efficiently, and providing interpretable reasoning for the search process. + +Despite the promising potential of LLM-based equation discovery methods, their rigorous and robust evaluation still remains an open challenge. The current scientific equation discovery benchmarks are primarily represented by SRBench (La Cava et al., 2021) and SRSD (Matsubara et al., 2022). SRBench incorporates two key data groups for this purpose: the Feynman physics equations (Udrescu + +& Tegmark, 2020), and Strogatz dynamical systems (La Cava et al., 2016; Strogatz, 2018). A notable extension to this framework is SRSD (Matsubara et al., 2022), which enhances the Feynman benchmark by incorporating physically meaningful sampling ranges for data points. However, these benchmarks exhibit significant limitations for the evaluation of LLM-based methods. Their problems are mostly based on known physics equations from textbooks, which makes them often subject to memorization by LLMs. + +As noted by (Shojaaee et al., 2024b), LLMs frequently succeed on these common equation discovery benchmarks through simple recitation based on variable names and problem descriptions, rather than the actual process of data-driven discovery and reasoning. Our analysis (shown in Fig. 1) also confirms this finding - the sudden drop in the numeric error curve within the first few iterations and significantly lower symbolic error on Feynman problems indicate memorized solutions rather than a meaningful search towards discovery. To mitigate this issue, (Shojaaee et al., 2024b; Ma et al., 2024) have introduced a handful of five custom-crafted problems designed to prevent memorization by manually modifying known physical models. While these efforts represent a step forward, the small scale and limited diversity of these problem sets are insufficient to provide a comprehensive evaluation framework for emerging LLM-based methods in scientific equation discovery. A more robust and systematic benchmark is needed to enable standardized evaluation and foster the development of innovative methods in this emerging field. + +In this paper, we introduce LLM-SRBench, a new benchmark designed to rigorously evaluate the capabilities of LLM-based scientific equation discovery methods. LLM-SRBench addresses the limitations of existing benchmarks by constructing problem sets that avoid trivial recitation while leveraging the scientific priors embedded in LLMs, simulating conditions akin to scientific discovery. The benchmark is structured around two main categories of problems, each targeting distinct aspects of equation discovery. The first category focuses on transforming common scientific problems, such as those from the Feynman equations, into different mathematical representations of the same underlying physical problem. By symbolically altering input-output mappings and generating less common mathematical forms for the same problem, we challenge LLM-based equation discovery to go beyond memorization of the common forms. This approach is motivated by recent findings on the fragility of LLMs' reasoning capabilities to unfamiliar representations of otherwise familiar problems (Mirzadeh et al., 2024; Xie et al., 2024; Wu et al., 2023). The second category extends the approach introduced by (Shojae et al., 2024b), which combines known terms in the underlying equation with synthetic, novel terms to create problems that go beyond memorization and demand data-driven reasoning. We expand this idea into a comprehensive set of benchmark problems spanning diverse scientific domains. These problems incorporate carefully designed synthetic terms that are both novel and plausible. We further verify the solvability of the generated equations using numerical solvers, ensuring that the benchmark problems remain grounded in physical feasibility while presenting meaningful challenges for LLM-based discovery methods. + +LLM-SRBench comprises 111 problems in the first category (LSR-Transform), and 128 problems in the second category (LSR-Synth), spanning four scientific domains: chemistry (36), biology (24), physics (43), and material science (25). We comprehensively benchmark state-of-the-art LLM-based scientific equation discovery methods with several LLM backbones on these datasets. Our experiments reveal several key insights into the capabilities and limitations of current LLM-based scientific equation discovery methods. Results show that the best model can only solve $31.5\%$ of problems on LSR-Transform and $28.1\%$ on LSR-Synth. This underscores the challenging nature of the tasks in LLM-SRBench and highlights its potential as a critical evaluation foundation for future LLM-based scientific equation discovery methods. Overall, the contributions of this work are as follows: + +- We introduce LLM-SRBench, the first comprehensive benchmark with 239 challenging problems across various scientific domains, designed to evaluate LLM-based scientific equation discovery methods. + +- We propose a novel benchmark design through alternative mathematical representations (LSR-Transform) and synthetic, discovery-driven problems (LSR-Synth) to ensure rigorous evaluation of scientific reasoning and discovery capabilities beyond LLM memorization. +- Extensive experiments on state-of-the-art methods reveal performance peaks at $31\%$ , highlighting the benchmark's challenging nature and its potential for future research. + +# 2. LLM-SRBench + +We introduce LLM-SRBench, a novel benchmark designed to evaluate LLM-based methods for data-driven scientific equation discovery. As shown in Fig. 2, in this benchmark, a "data-driven scientific equation discovery" task is defined as follows: Given a task dataset $\mathcal{D}$ , the corresponding scientific context $\mathcal{C}$ , the objective is to derive a hypothesis $h$ that represents the underlying mathematical relations behind the data with high precision and scientific plausibility. This process resembles the iterative search and refinement undertaken by human scientists, where LLMs act as optimizers, proposing and refining hypotheses based on both scientific knowledge and empirical data. + +# 2.1. LSR-Transform + +This category is designed to evaluate whether LLM-based methods can discover equations in less common mathematical forms, avoiding reliance on memorization of well-known representations. This approach is motivated by the observation that LLMs often struggle with unfamiliar instantiations of otherwise familiar problems, as highlighted by recent studies on the fragility of LLM reasoning (Mirzadeh et al., 2024; Xie et al., 2024; Wu et al., 2023). By transforming existing benchmark problems into different mathematical representations, we challenge LLMs' capabilities in data-driven scientific equation discovery and reasoning. + +We build on the Feynman (Udrescu & Tegmark, 2020) benchmark (current standard benchmark in scientific equation discovery), which consists of 100 physics equations, and systematically transform these equations into alternative mathematical forms (examples in App. A.1). As demonstrated in Fig. 3(a), the transformation process involves seven key steps: 1) Equation Collection: We gather the original mathematical expressions, along with their input and output variables, and scientific problem descriptions from the Feynman benchmark. 2) Select Pivot Variable: For each equation, we choose an input feature to become the new target variable. 3) Feature-Target Transformation: We transform the dataset by switching the roles of the selected input feature and the original target variable. 4) Symbolic Transformation: Using the SymPy library in Python on the parsed expressions, we solve each equation with respect to the selected input variable, treating it + +![](images/f71f46fe53f6fb3b1b827b582fcd45a5f92ea2842866c6ce5dcdfaf13dedb8d8.jpg) +(a) LSR-Transform + +![](images/4c620bd69fe9f710128e1e41b30a46b9f571f0d7ea8754a51132638fc907b0c9.jpg) +(b) LSR-Synth +Figure 3. Data generation pipelines for the two dataset categories in LLM-SRBench. (a) LSR-Transform converts Feynman problems into alternative mathematical forms through symbolic transformation and input-output role switching, and (b) LSR-Synth generates novel discovery-driven problems by combining known scientific terms in the underlying models with synthetic novel terms. Both pipelines include validation steps to ensure solvability and scientific plausibility. + +as the new output and the original output variable as an input in the transformed equation. 5) Solvability Check: We retain only those transformations that are analytically solvable, ensuring the feasibility of the resulting equations. 6) Dataset Refinement: For the transformed equations with altered data domains (e.g., due to square roots or denominators), we filter the original Feynman dataset to ensure all data points fall within the valid domains of the new equations. 7) Problem Reformulation: Using LLM (GPT4o), we generate a new natural language specification for each transformed problem. During this data generation process, we constrain the transformed equations' complexity (measured by expression tree node count) to the range of original Feynman dataset distribution (full analysis in Fig. 8, App.A.1). This allows us to focus on the semantic aspects of discovery—specifically the interplay between reasoning and memorization of the mathematical forms—rather than conflating performance with the ability to handle syntactically complex and lengthy hypotheses. We also exclude transformed problems that LLM can solve through direct sampling without requiring access to data. + +This process yields 111 total transformed equations derived from the 100 original Feynman problems. Each transformed equation shares the same scientific context, problem description, and variables as its original counterpart but presents a less common mathematical form to be discovered. The goal of LSR-Transform is not to discover new equations but to evaluate whether LLM-based systems can validate discoveries from non-trivial, data-driven transformations of known + +equations. To support scientific knowledge-guided discovery, each task in LSR-Transform is supplemented with a natural language description of the scientific problem and dataset, including variable names and their meanings. These descriptions are absent in the original Feynman benchmark but they are needed for LLM-based scientific equation discovery methods to provide scientific context in prompts for knowledge-guided equation discovery by LLMs. + +# 2.2. LSR-Synth + +This category is designed to assess whether LLMs can discover equations that incorporate new synthetic terms alongside known terms, requiring scientific as well as data-driven reasoning rather than reliance on memorization. The LSR-Synth dataset is motivated by the approach introduced in (Shojae et al., 2024b) for the handful of manually designed problems and systematically expands it into a comprehensive set of benchmark problems across diverse scientific domains. By combining known terms with synthetic, novel terms, LLMs are challenged to demonstrate discovery capabilities in unobserved contexts, yet leverage their knowledge in the process. The LSR-Synth dataset spans four scientific domains: chemistry, biology, physics, and material science, focusing on key scientific problems, including reaction kinetics in chemistry, population growth in biology, damped harmonic oscillators in physics, and stress-strain relationships in material science (examples in App. A.2). + +The data generation process for LSR-Synth involves multi + +ple steps , as illustrated in Fig. 3(b), to ensure the creation of high-quality, challenging benchmark problems: 1) Select Scientific Problem: We select problems from different scientific domains, such as reaction kinetics in chemistry or population dynamics in biology. 2) Known Term Generation: Given the problem description, we prompt an LLM (GPT-4o) to generate a list of common and well-known mathematical terms that typically appear in the underlying models. 3) Synthetic Term Generation: Similarly, we prompt the LLM to generate a list of diverse novel synthetic terms for a given scientific problem, along with descriptions of the problem and variables. For example, in chemistry reaction kinetics, known terms for reaction rate $(dA / dt)$ based on concentration $(A)$ and time $(t)$ might include first-order $(-kA)$ and second-order kinetics $(-kA^2)$ or the exponential decay term $-k\exp (-k_st)$ , while synthetic terms could represent non-linear high-order saturation, e.g., $kA^2 /(1 + \beta A^4)$ , or non-linear quantum tunneling effects, e.g., $kA\exp (-\frac{\gamma}{t}) / t^2$ . 4) Solvability Check: After sampling from the generated known and synthetic terms and combining them into a complete mathematical expression, we verify the solvability of these expressions using numerical solvers such as solve_ivp in Python. This step ensures that the expressions are feasible, providing a basis for generating datapoints. 5) Novelty Check: In the context of each scientific problem and the complete expression, we evaluate the novelty of the new generated task using LLM (GPT-4o) as a novelty evaluator. This step is to verify that the synthetic terms are novel in the provided context and require data-driven reasoning rather than relying on established knowledge to be discovered. 6) Databe point Generation: For expressions that pass the solvability and novelty checks, we generate datapoints using numerical solvers based on the specified initial conditions and parameters. These datapoints are used to create the final task datasets. 7) Expert Validation: Finally, the filtered expressions, along with visualizations of their generated datapoints, are cross-checked by two subject matter experts to validate their plausibility. After these filtering steps, we finalize a candidate list of 128 problems across the four domains (36: chemistry; 24: biology; 43: physics; and 25: material science). More detailed analysis of LLM-SRBench datasets are provided in App. A. + +# 2.3. Evaluation + +Evaluating LLM-based scientific equation discovery methods introduces unique challenges due to the open-ended nature of the task and diverse symbolic representation of hypotheses. A discovered equation can be assessed from two perspectives: (a) data fidelity, which measures how well the equation fits the observed and out-of-domain (OOD) data, and (b) symbolic accuracy, which evaluates the alignment with ground-truth symbolic equation hypotheses. Both + +perspectives are critical, as equations may exhibit similar symbolic forms but differ numerically, or vice versa. + +Data Fidelity. We evaluate data-driven fidelity using two known metrics in equation discovery: (1) Accuracy to tolerance $\tau$ ( $\mathrm{Acc}_{\tau}$ ) (Kamienny et al., 2022; Biggio et al., 2021), and Normalized Mean Squared Error (NMSE). These metrics are computed on both in-domain test data and OOD data (when available) to assess generalization capacity, a crucial requirement for scientific equations. + +$$ +\operatorname {A c c} _ {\tau} = \mathbb {1} \left(\max _ {1 \leq i \leq N _ {\text {t e s t}}} \left| \frac {\hat {y} _ {i} - y _ {i}}{y _ {i}} \right| \leq \tau\right), +$$ + +$$ +\mathrm {N M S E} = \frac {\sum_ {i = 1} ^ {N _ {\mathrm {t e s t}}} (\hat {y} _ {i} - y _ {i}) ^ {2}}{\sum_ {i = 1} ^ {N _ {\mathrm {t e s t}}} (y _ {i} - \bar {y}) ^ {2}} +$$ + +Symbolic Accuracy. We evaluate symbolic accuracy with a model-based evaluation strategy using GPT-4o as an evaluator (prompt in App. B, Fig. 11). This approach addresses the limitations of current symbolic metrics like recovery rate in symbolic regression (La Cava et al., 2016), which are very sensitive to exact symbolic matches and fail to account for mathematical equivalence, particularly in different hypothesis representations (e.g., equation as strings, expression trees, or Python programs). Here, GPT-4o evaluates mathematical equivalence by comparing the symbolic form of the predicted hypothesis versus the ground-truth equation after removing parameters and constants. The ability of LLMs to recognize semantic equivalence across different representations makes them particularly well-suited for evaluating LLM-based equation discovery methods, which often operate within a more diverse and open-ended hypothesis space. To validate this metric, two authors also independently evaluated symbolic equivalence on 130 sampled problems, finding $94.6\%$ agreement between GPT-4o and human evaluators. App. B provides more details on the evaluation metrics. + +# 3. Experiments + +# 3.1. Experimental Setup + +We benchmark state-of-the-art LLM-based scientific equation discovery methods using three LLM backbones: one open-source model (Llama-3.1-8B-Instruct) and two proprietary models (GPT-4o-mini and GPT-3.5-turbo). Each discovery task takes as input the problem description, variables, the corresponding dataset, and an instruction specifying the task. The discovery methods then generate and refine equation hypotheses through LLMs. To ensure fair comparison, we standardize each of the methods to use 1k LLM calls per problem while maintaining their core algorithmic designs and hyperparameter settings. Detailed implementation specifics and prompts of each method are provided in App. C. We + +Table 1. Comparison of different LLM-based scientific equation discovery methods on LLM-SRBench. Performance metrics include symbolic accuracy (SA), numeric precision $(\mathrm{Acc}_{0.1})$ , and normalized mean squared error (NMSE). Bold values indicate best performance within each method, and underlined values show best overall performance across discovery methods. + +
ModelsLSR-TransformLSR-Synth
ChemistryBiologyPhysicsMaterial Science
SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓
Direct Prompting (DataBlind)
Llama-3.1-8B-Instruct3.611.8010.36970.00.00.06440.00.00.54810.00.00.04590.00.00.0826
GPT-3.5-turbo2.101.8010.35530.08.330.00230.04.160.59900.02.270.02740.00.00.0277
GPT-4o-mini7.216.3060.26310.013.880.02210.04.160.46484.549.090.06470.00.00.0484
SGA (Ma et al., 2024)
Llama-3.1-8B-Instruct2.700.9090.35190.08.330.04580.00.00.24160.02.270.15490.012.120.0435
GPT-3.5-turbo0.00.9090.34650.08.330.00710.08.330.12792.274.540.02490.028.100.0019
GPT-4o-mini9.918.110.23210.016.665.46e-44.1612.510.01284.549.090.05110.036.116.02e-4
LaSR (Grayeli et al., 2024)
Llama-3.1-8B-Instruct5.4145.940.00210.027.772.77e-44.1616.662.73e-44.5425.020.00188.2164.227.44e-5
GPT-3.5-turbo12.6147.740.00150.038.891.51e-40.016.662.31e-46.8122.710.001120.6664.093.77e-5
GPT-4o-mini6.3150.450.00112.7738.929.11e-58.3320.831.53e-49.9131.819.94e-428.1272.049.23e-6
LLM-SR (Shojaece et al., 2024b)
Llama-3.1-8B-Instruct30.6338.550.01018.3366.668.01e-625.3058.331.04e-66.9734.091.23e-44.1088.121.15e-7
GPT-3.5-turbo10.8110.810.14490.050.222.87e-50.025.032.33e-50.025.128.84e-412.4282.142.75e-8
GPT-4o-mini31.5339.640.009111.1152.774.12e-616.6629.163.06e-69.9136.367.62e-520.2488.283.21e-9
+ +evaluate the following discovery methods: + +LLM-SR (Shojace et al., 2024b), a program search equation discovery method that generates hypotheses of equation skeleton as Python functions with the main idea of combining LLMs' scientific knowledge with multi-island evolutionary search guided by feedback from data. + +LaSR (Grayeli et al., 2024), a concept learning equation discovery method that finds abstract textual concepts of mathematical relations from successful equation hypotheses with LLMs and uses these concepts to evolve new hypotheses through a hybrid approach of evolutionary search (with PySR (Cranmer, 2023)) and LLM-guided search. + +SGA (Ma et al., 2024), a bilevel optimization equation discovery method that iteratively combines LLMs for discrete hypothesis generation of scientific laws and physical simulations in PyTorch for continuous parameter optimization with respect to data. + +Direct Prompting (DataBlind) serves as a baseline for generating hypotheses purely from contextual information without access to data. By not using data-driven reasoning and refinement in the hypothesis generation, this baseline helps to assess LLMs' memorization of the problem. + +# 3.2. Main Results + +Our experimental results (Table 1) reveals several key insights into the strengths and limitations of LLM-based scientific equation discovery methods. Overall, performance + +![](images/e6d31a599f2e56fc3e8bcf784ee3f05f71964409e21e6a4b52c86a09aa0d91e6.jpg) +Figure 4. Performance comparison across equation complexity levels for Feynman and LSR-Transform datasets: (a) symbolic accuracy and (b) numeric precision $(\mathrm{Acc}_{0.1})$ showing considerable performance gap between these two datasets at same complexity levels (averaged over all method-LLM pairs). + +![](images/396f55c66c8f2289a25537401b85bf1309aa6f76d7a756db809edb8483380734.jpg) + +remains relatively low across both symbolic and numeric metrics, underscoring the fundamental challenges of this task. One key observation is the poor performance of direct prompting method (DataBlind), which only relies on LLMs' knowledge about the problem without access to data for data-driven refinement. This result underscores the necessity of combining LLM reasoning with observational data, as relying solely on prior knowledge proves insufficient for accurate equation discovery across different problems in LLM-SRBench. We observe that on LSR-Transform data group, LaSR achieves the highest numerical accuracy, leading in both $\mathrm{Acc}_{0.1}$ and NMSE, while LLM-SR with GPT + +![](images/894c6bbe436e91e344a48f1b4afa6678514260a01ee7aec8ebbf71e5ed22ed03.jpg) +Figure 5. Detailed results of in-domain (ID) and out-of-domain (OOD) performance using Normalized Mean Squared Error across various LSR-Synth scientific domains and LLM-based equation discovery methods (with GPT-4o-mini as LLM backbone). + +4o-mini outperforms other methods in symbolic accuracy $(\sim 31\%)$ . This comparative advantage inverts in the LSR-Synth material science problems, where LaSR consistently yields better symbolic accuracy and LLM-SR achieves better numerical precision, suggesting that different equation discovery strategies may be better suited to different problems. + +Another notable observation is the consistent outperformance of models using GPT-4o-mini and Llama-3.1-8B compared to those based on GPT-3.5-turbo. This may be due to improved reasoning architectures or better effectiveness of smaller, less opinionated models in the search and exploration needed for navigating space of possible equations. The lower performance on LSR-Synth compared to LSR-Transform tasks also indicates that the ability to find transformed variants of known problems does not necessarily extend to more challenging scenarios involving novel synthetic terms, where systematic data-driven exploration becomes essential. + +# 3.3. Analysis + +LSR-Transform vs. Feynman datasets. We analyze the performance gap between Feynman and LSR-Transform datasets across different equation complexity levels, measured by the number of nodes in the corresponding expression tree (La Cava et al., 2021). Fig. 4 shows the aggregated average performance (over all methods and LLM backbones) in terms of both symbolic accuracy (a) and numeric precision (b). It can be observed that even at the same complexity levels, LSR-Transform problems are substantially more challenging for current discovery methods than original Feynman problems. Also, this performance disparity persists even for simpler problems ([0-15] nodes), indicating that the challenging nature of LSR-Transform problems for LLM-based scientific equation discovery methods is not necessarily due to the structural complexity. + +Performance on In-domain vs. OOD. Generalization to unseen data is a fundamental requirement for scientific laws and a critical aspect of equation discovery. A correct mathematical model of observations should not only fit observed data but also extrapolate accurately to out-of-domain (OOD) scenarios. However, current equation discovery benchmarks largely overlook this aspect. In this work, we advocate for explicit OOD assessment in scientific equation discovery by introducing held-out OOD test sets in our benchmark. To systematically evaluate generalization beyond observed data, we generate dedicated OOD test sets for synthetic problems in the LSR-Synth category (see App. A for details on data generation). Fig. 5 provides a comparative analysis of ID vs. OOD results. As expected, all discovery methods exhibit higher NMSE in OOD settings, indicating degraded generalization compared to in-domain data. Among the evaluated methods, LLM-SR achieves the lowest NMSE across both ID and OOD settings, while direct prompting performs the worst. Also, we observe some domain-specific variations in generalization performance: the performance gap between ID and OOD is more pronounced in chemistry and biology problems compared to physics and material science, although the complexity of problems are designed to be similar, as shown in Fig. 10. This suggests that different scientific problems may pose distinct challenges for equation discovery methods, highlighting the need for future research to develop more robust approaches for different scientific disciplines. + +OD generalization and symbolic accuracy. We further analyzed the correlation between our proposed symbolic accuracy metric (Sec. 2.3) and data-driven extrapolation performance in OOD settings (averaged over all LSR-Synth domains). As shown in Fig. 6, symbolic accuracy exhibits a strong positive correlation with numerical precision $(\mathrm{Acc}_{0.1})$ on OOD data and a corresponding negative correlation with numerical error (NMSE). This strong correlation observed + +![](images/ca65ea72e3598758183c47b1c9383295ef13fd6f3425058eab5ecd3109f15d85.jpg) +Figure 6. Correlation between symbolic accuracy and OOD performance across different equation discovery methods and LLM backbones: (a) symbolic accuracy vs. $\mathrm{Acc}_{0.1}$ showing positive correlation; (b) symbolic accuracy vs. normalized mean squared error showing negative correlation. Results are averaged over all LSR-Synth datasets. + +![](images/829ccf7d250536ce02ae636e646d148b83b71780115b434595a8856f534f2c26.jpg) + +between symbolic and OOD performance provides two key insights: First, it establishes OOD evaluation as a powerful approach for assessing the discovery of generalizable equations—an aspect often underutilized in symbolic regression research; second, it validates our LLM-based symbolic evaluation approach through its strong alignment with numeric generalization performance. + +More detailed experimental results, including both qualitative analyses of discovered equations and quantitative performance comparisons across scientific equation discovery methods and LLMs, are provided in App. D. + +# 4. Related Work + +AI for Scientific Discovery. Recent advancements in AI for science highlight the ability of LLMs to generate scientific hypotheses by leveraging their extensive knowledge and reasoning capabilities (Lu et al., 2024; Ji et al., 2024; Reddy & Shojaee, 2024). LLM agents, when augmented with external tools and scientific simulators, have shown promise in automated scientific data-driven analysis (Majumder et al., 2024a). While recent benchmarks have been developed to evaluate LLMs and agents in hypothesis generation and scientific question answering (Majumder et al., 2024b; Chen et al., 2024), evaluation for equation discovery and symbolic regression—one of the core tasks in scientific discovery—remains yet unexplored. + +Symbolic Regression. Symbolic regression approaches fall into three main categories: search-based methods that explore equation spaces via evolutionary algorithms or reinforcement learning (Schmidt & Lipson, 2009; Cranmer, 2023; Petersen et al., 2021; Sun et al., 2023), learning-based methods leveraging pre-trained Transformers on synthetic data (Biggio et al., 2021; Kamienny et al., 2022), and hybrid approaches that guide search using neural priors (Landajuela et al., 2022; Shojaee et al., 2024a; Mundhenk et al., 2021; + +Meidani et al., 2023). While these methods have advanced the field of automated symbolic function discovery from data, they mostly lack mechanisms to incorporate scientific domain knowledge into the discovery process. + +LLMs for Equation Discovery. Recent work has leveraged LLM-based symbolic regression to enhance scientific equation discovery through various approaches leveraging LLMs' knowledge. LLM-SR (Shojaee et al., 2024b) utilizes LLMs' embedded scientific knowledge to generate initial equation hypotheses in the form of Python programming functions, which are then refined through adaptive mutation and crossover operations with LLMs as evolutionary optimizers. In-Context Symbolic Regression (ICSR) (Merler et al., 2024) employs an iterative few-shot learning paradigm over expression candidates, using previously tested successful expressions along with their fitness scores to guide the generation of improved candidates. LaSR (Grayeli et al., 2024) alternates between hypothesis evolution, concept abstraction, and concept iteration phases to build a learned library of scientific concepts for mathematical relations needed to find the equation for a given data. The learned concepts are then used with pure evolutionary search methods (Cranmer, 2023) like PySR (Cranmer, 2023) as well as LLM-guided search to guide the equation hypothesis evolution. Scientific Generative Agent (SGA) (Ma et al., 2024) also implements a bilevel optimization framework for equation discovery where LLMs iteratively propose discrete hypotheses for scientific laws while physical simulations in PyTorch provide experimental validation and data-driven parameter optimization. + +Symbolic Regression Benchmarks. Symbolic regression benchmarks can be broadly categorized into scientific discovery-oriented and general-purpose mathematical discovery collections. The scientific equation discovery benchmarks are primarily represented by the SRBench (La Cava et al., 2021) and SRSD (Matsubara et al., 2022) benchmarks. SRBench incorporates two key data groups for this purpose: the Feynman physics equations (Udrescu & Tegmark, 2020), and Strogatz dynamical systems (La Cava et al., 2016; Strogatz, 2018). A notable extension to this framework is presented in SRSD (Matsubara et al., 2022), which enhances the Feynman benchmark by incorporating physically meaningful sampling ranges for datapoints. The second category includes benchmarks like the Nguyen collection (Uy et al., 2011) and SRBench's black-box regression problems (La Cava et al., 2016) which include datasets without scientific contexts. However, these existing benchmarks are not well-suited for evaluating LLM-based equation discovery methods. These general-purpose benchmarks focus on the data-driven discovery of abstract mathematical functions without scientific context, while the former scientific benchmarks consist of well-known equations likely memorized by LLMs, enabling success through recitation rather than + +scientific reasoning and discovery. Our work extends this line of research by focusing on scientific equation discovery with LLMs, designing the first comprehensive benchmark to assess discovery capabilities of LLM-based scientific equation discovery methods beyond memorization. + +# 5. Conclusion + +We introduce LLM-SRBench, the first comprehensive benchmark for LLM-driven scientific equation discovery, encompassing 239 tasks across two distinct categories: LSR-Transform (111 problems derived from transformations of established physical models) and LSR-Synth (128 novel synthetic problems spanning four scientific disciplines). Our benchmark provides a standardized and multi-faceted evaluation protocol for assessing scientific equation discovery with LLMs, accommodating diverse hypothesis representations, including expression strings and programs. Extensive experiments with state-of-the-art discovery methods and various LLM backbones on LLM-SRBench show a peak performance of only $31\%$ , highlighting the significant challenges and open research opportunities in this domain. We envision that LLM-SRBench benchmark datasets and its evaluation protocol could serve as a foundation for future research, driving progress in automated equation discovery and advancing our understanding of LLMs in symbolic reasoning needed in scientific discovery. + +# Impact Statement + +The development and future adoption of LLM-SRBench as a benchmark for evaluating LLM-based scientific equation discovery has the potential to significantly impact the field of artificial intelligence for science and scientific discovery. There are many potential societal consequences of our work, none of which we feel must be specifically highlighted here. + +# Acknowledgments + +This research was partially supported by the U.S. National Science Foundation (NSF) under Grant No. 2416728. + +# References + +Biggio, L., Bendinelli, T., Neitz, A., Lucchi, A., and Paras-candolo, G. Neural symbolic regression that scales. In Meila, M. and Zhang, T. (eds.), Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pp. 936-945. PMLR, 18-24 Jul 2021. +Chen, Z., Chen, S., Ning, Y., Zhang, Q., Wang, B., Yu, B., Li, Y., Liao, Z., Wei, C., Lu, Z., et al. Scienceagentbench: Toward rigorous assessment of language + +agents for data-driven scientific discovery. arXiv preprint arXiv:2410.05080, 2024. +Cranmer, M. Interpretable machine learning for science with pysr and symbolicregression. jl. arXiv preprint arXiv:2305.01582, 2023. +Du, M., Chen, Y., Wang, Z., Nie, L., and Zhang, D. Large language models for automatic equation discovery of nonlinear dynamics. Physics of Fluids, 36(9), 2024. +Grayeli, A., Sehgal, A., Costilla-Reyes, O., Cranmer, M., and Chaudhuri, S. Symbolic regression with a learned concept library. arXiv preprint arXiv:2409.09359, 2024. +Ji, H., Wang, Q., Downey, D., and Hope, T. Scimon: Scientific inspiration machines optimized for novelty. In ACL Anthology: Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 279-299. University of Illinois Urbana-Champaign/CABBI, 2024. +Kamienny, P.-A., d'Ascoli, S., Lample, G., and Charton, F. End-to-end symbolic regression with transformers. In Advances in Neural Information Processing Systems, 2022. +La Cava, W., Danai, K., and Spector, L. Inference of compact nonlinear dynamic models by epigenetic local search. Engineering Applications of Artificial Intelligence, 55:292-306, 2016. ISSN 0952-1976. doi: https://doi.org/10.1016/j.engappai.2016.07.004. URL https://www.sciencedirect.com/science/article/pii/S0952197616301294. +La Cava, W., Orzechowski, P., Burlacu, B., de Franca, F., Virgolin, M., Jin, Y., Kommenda, M., and Moore, J. Contemporary symbolic regression methods and their relative performance. In Vanschoren, J. and Yeung, S. (eds.), Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, volume 1, 2021. +Landajuela, M., Lee, C., Yang, J., Glatt, R., Santiago, C. P., Aravena, I., Mundhenk, T. N., Mulcahy, G., and Petersen, B. K. A unified framework for deep symbolic regression. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K. (eds.), Advances in Neural Information Processing Systems, 2022. +Langley, P. Data-driven discovery of physical laws. Cognitive Science, 5(1):31-54, 1981. +Lu, C., Lu, C., Lange, R. T., Foerster, J., Clune, J., and Ha, D. The ai scientist: Towards fully automated open-ended scientific discovery. arXiv preprint arXiv:2408.06292, 2024. + +Ma, P., Wang, T.-H., Guo, M., Sun, Z., Tenenbaum, J. B., Rus, D., Gan, C., and Matusik, W. LLM and simulation as bilevel optimizers: A new paradigm to advance physical scientific discovery. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=hz8cFsdz7P. +Majumder, B. P., Surana, H., Agarwal, D., Hazra, S., Sabharwal, A., and Clark, P. Data-driven discovery with large generative models. arXiv preprint arXiv:2402.13610, 2024a. +Majumder, B. P., Surana, H., Agarwal, D., Mishra, B. D., Meena, A., Prakhar, A., Vora, T., Khot, T., Sabharwal, A., and Clark, P. Discoverybench: Towards data-driven discovery with large language models. arXiv preprint arXiv:2407.01725, 2024b. +Matsubara, Y., Chiba, N., Igarashi, R., Tatsunori, T., and Ushiku, Y. Rethinking symbolic regression datasets and benchmarks for scientific discovery. arXiv preprint arXiv:2206.10540, 2022. +Meidani, K., Shojaee, P., Reddy, C. K., and Farimani, A. B. Snip: Bridging mathematical symbolic and numeric realms with unified pre-training. In The Twelfth International Conference on Learning Representations, 2023. +Merler, M., Haitsiukevich, K., Dainese, N., and Marttinen, P. In-context symbolic regression: Leveraging large language models for function discovery. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop), pp. 589-606, 2024. +Mirzadeh, I., Alizadeh, K., Shahrokhi, H., Tuzel, O., Bengio, S., and Farajtabar, M. Gsm-symbolic: Understanding the limitations of mathematical reasoning in large language models. arXiv preprint arXiv:2410.05229, 2024. +Mundhenk, T. N., Landajuela, M., Glatt, R., Santiago, C. P., faissol, D., and Petersen, B. K. Symbolic regression via deep reinforcement learning enhanced genetic programming seeding. In Beygelzimer, A., Dauphin, Y., Liang, P., and Vaughan, J. W. (eds.), Advances in Neural Information Processing Systems, 2021. +Petersen, B. K., Larma, M. L., Mundhenk, T. N., Santiago, C. P., Kim, S. K., and Kim, J. T. Deep symbolic regression: Recovering mathematical expressions from data via risk-seeking policy gradients. In International Conference on Learning Representations, 2021. +Reddy, C. K. and Shojaee, P. Towards scientific discovery with generative ai: Progress, opportunities, and challenges. arXiv preprint arXiv:2412.11427, 2024. + +Schmidt, M. and Lipson, H. Distilling free-form natural laws from experimental data. Science Advance, 324 (5923):81-85, 2009. ISSN 0036-8075. doi: 10.1126/science.1165893. +Shojae, P., Meidani, K., Barati Farimani, A., and Reddy, C. Transformer-based planning for symbolic regression. Advances in Neural Information Processing Systems, 36, 2024a. +Shojaee, P., Meidani, K., Gupta, S., Farimani, A. B., and Reddy, C. K. Llm-sr: Scientific equation discovery via programming with large language models. arXiv preprint arXiv:2404.18400, 2024b. +Strogatz, S. H. Nonlinear dynamics and chaos with student solutions manual: With applications to physics, biology, chemistry, and engineering. CRC press, 2018. +Sun, F., Liu, Y., Wang, J.-X., and Sun, H. Symbolic physics learner: Discovering governing equations via monte carlo tree search. In The Eleventh International Conference on Learning Representations, 2023. +Udrescu, S.-M. and Tegmark, M. Ai feynman: A physics-inspired method for symbolic regression. Science Advances, 6(16):eaay2631, 2020. doi: 10.1126/sciadv.aay2631. +Uy, N. Q., Hoai, N. X., O'Neill, M., McKay, R. I., and Galván-López, E. Semantically-based crossover in genetic programming: application to real-valued symbolic regression. Genetic Programming and Evolvable Machines, 12:91-119, 2011. +Virgolin, M. and Pissis, S. P. Symbolic regression is NP-hard. Transactions on Machine Learning Research, 2022. ISSN 2835-8856. +Wu, Z., Qiu, L., Ross, A., Akyurek, E., Chen, B., Wang, B., Kim, N., Andreas, J., and Kim, Y. Reasoning or reciting? exploring the capabilities and limitations of language models through counterfactual tasks. arXiv preprint arXiv:2307.02477, 2023. +Xie, C., Huang, Y., Zhang, C., Yu, D., Chen, X., Lin, B. Y., Li, B., Ghazi, B., and Kumar, R. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024. +Zhang, Y., Zheng, K., Liu, F., Zhang, Q., and Wang, Z. Autoturb: Using large language models for automatic algebraic model discovery of turbulence closure. arXiv preprint arXiv:2410.10657, 2024. + +# Appendix + +# A. Dataset Details + +# A.1. LSR-Transform + +The LSR-Transform is the first category of datasets in LLM-SRBench, designed to evaluate the ability of LLM-based scientific equation discovery methods in less common mathematical forms. This dataset challenges LLM-based discovery methods to avoid reliance on memorization of well-known representations and instead reason through unfamiliar instantiations of familiar problems. This approach is motivated by the observation that LLMs often struggle with unfamiliar instantiations of otherwise familiar problems, as highlighted by recent studies on the fragility of LLM reasoning (Mirzadeh et al., 2024). By transforming existing benchmark problems into alternative mathematical representations, LSR-Transform provides a rigorous testbed to evaluate how well LLM-based discovery methods perform in both (1) semantic scientific reasoning, which draws on LLMs' built-in scientific knowledge, and (2) data-driven reasoning, which utilizes experimental feedback for equation discovery. LSR-Transform builds on the Feynman benchmark (Udrescu & Tegmark, 2020), a widely used standard benchmark in scientific equation discovery and symbolic regression. The Feynman benchmark consists of 100 physics equations from Feynman Lecture Series $^{1}$ , representing fundamental laws in physics. While the Feynman benchmark has been instrumental in evaluating symbolic regression methods, it primarily tests the ability to recover equations in their standard, well-known forms which are mostly memorized by LLMs. However, real-world scientific equation discovery often involves reasoning about unknown equations based on domain expertise and knowledge from literature as well as empirical data observations. To address this gap, LSR-Transform transforms the original Feynman equations into less common alternative mathematical forms of the same physical problem by switching input-output variables and symbolically solving for the new target variables. + +![](images/8ba728d78f2919727c08b3690d02bb7aec14be00772e7730233d78780cba6800.jpg) +Figure 7. Examples of how LLM-SRBench (LSR-Transform) problems can be obtained from original Feynman benchmark problems. + +Figure 7 demonstrates the equation transformation process, showing examples of the original Feynman problems (along with their scientific descriptions) and their potential transformed versions. These examples show the dataset's design for altering the mathematical representation of the same problem by analytically solving the equations with respect to different input variables. For instance, the original harmonic oscillator energy equation $E = \frac{1}{4} m(\omega^2 + \omega_0^2)x^2$ is transformed into symbolic representation of $m = \frac{4E}{(\omega^2 + \omega_0^2)x^2}$ and $\omega = \sqrt{\frac{4E}{mx^2} - \omega_0^2}$ where the target variable is switched from energy $(E)$ + +![](images/053f4e582db53b49c4f4556f3f98b2f4911c4a84b86aec09383eaac4c74971e9.jpg) +Figure 8. Comparison of expression complexity distributions between Feynman Benchmark and LLM-SRBench (LSR-Transform) datasets. + +to mass $(m)$ or angular frequency $(\omega)$ . Similarly, in the electric potential equation $V_{e} = \frac{1}{4\pi\epsilon}\frac{p_{d}\cos(\theta)}{r^{2}}$ is also transformed into $p_d = \frac{4\pi\epsilon r^2V_e}{\cos(\theta)}$ , and $r = \sqrt{\frac{p_d\cos(\theta)}{4\pi\epsilon V_e}}$ , showcasing how the problem is reformulated to solve for dipole moment $(p_d)$ , and distance $(r)$ . These transformations introduce less-common mathematical representations that are simple but not trivial for LLMs to find from the problem description and data. By systematically altering the input-output relationships into new analytically solvable symbolic forms, LSR-Transform challenges models to reason through unfamiliar mathematical forms, testing their ability to generalize beyond memorized representations and leverage data-driven reasoning to find new forms. + +The transformed expressions generally exhibit higher complexity than the original physical laws in the Feynman benchmark. To maintain our focus on evaluating semantic complexity (reasoning and memorization capabilities) rather than syntactic complexity and lengthy hypotheses, we deliberately filtered out LSR-transform expressions with significantly higher complexities from the dataset. This filtering ensures that the benchmark primarily challenges discovery models' ability to understand and conduct both scientific and data-driven reasoning rather than their capacity to model longer and more complex mathematical expressions. Figure 8 demonstrates the complexity distribution between the original Feynman Benchmark problems versus their transformed counterparts in LSR-Transform. Following (La Cava et al., 2021), the complexity of each hypothesis (i.e., expression) is quantified as the number of nodes in the expression tree representation of the equation. The expression tree is constructed by parsing the equation into its constituent unary and binary operators, variables, and constants. + +Finally, we also exclude the transformed problems that LLM (Llama-3.1-8B-Instruct) can solve through direct sampling without requiring access to data. This process creates a dataset of 111 transformed equations, each sharing the same scientific context and variables as its original counterpart but presenting a less common mathematical form. The goal of LSR-Transform is not to discover new equations but to evaluate whether LLM-based systems can guide discoveries from non-trivial, data-driven transformations of known equations. + +Details of Filtering Process This section provides a comprehensive breakdown of the filtering steps applied during the LSR-Transform dataset generation, addressing the apparent reduction from 100 original Feynman problems to 111 transformed equations. The LSR-Transform dataset generation involves multiple filtering stages that significantly reduce the number of candidate problems. Starting from 100 original Feynman problems, the transformation process initially generates 471 candidate equations by selecting different pivot variables for each equation and performing feature-target transformations. This expansion reflects an average of approximately 4.7 transformed candidates per original problem, demonstrating the diversity introduced by considering multiple input variables as potential targets. The first major filtering occurs during the solvability check using SymPy's symbolic solver (Step 5 in Figure 3), which eliminates 53 problems (11.3% of candidates) that cannot be analytically solved for the target variable. These typically include transcendental equations without closed-form solutions, high-degree polynomial equations where symbolic solutions become intractable, and equations involving complex multi-valued functions. After this stage, 418 problems remain. Notably, no equations are + +eliminated during dataset refinement (Step 6 in Figure 3). This stage focuses solely on filtering individual datapoints to ensure they fall within the valid domains of the transformed equations (e.g., ensuring positive values under square roots, avoiding division by zero), while the equations themselves remain intact. The most significant reduction occurs during complexity filtering, where 307 problems (73.4% of remaining candidates) are eliminated, resulting in the final 111 problems. This filtering serves a crucial purpose: to ensure that the challenging nature of LSR-Transform stems from semantic complexity (reasoning about the scientific problem and unfamiliar mathematical forms) rather than syntactic complexity (handling lengthy expressions). Following La Cava et al. (?), complexity is measured as the number of nodes in the expression tree representation of each equation. Following this definition, we constrain the complexity distribution to match that of the original Feynman benchmark (Figure 8). In other words, transformed equations with complexity significantly exceeding the original Feynman distribution are exclude. These design choices maintain focus on testing reasoning capabilities while preserving analytical tractability and scientific diversity across physics domains. As demonstrated in Figure 8, even after filtering, LSR-Transform problems remain substantially more challenging than original Feynman problems at same levels of complexity. + +# A.2. LSR-Synth + +The LSR-Synth is the second category of datasets in LLM-SRBench which is a collection of synthetic problems designed to benchmark the performance of LLMs in scientific equation discovery. This dataset is particularly focused on generating plausible yet challenging equation discovery problems that span multiple scientific domains, including chemistry, physics, biology, and material science. The problems in LSR-Synth are constructed by combining known terms, which are well-established in the scientific literature, with synthetic terms that introduce novel and plausible variations to the equations. + +Figure 9 provides examples of problems from the LSR-Synth. These examples demonstrate the dataset's design, which combines well-established mathematical and scientific expressions with novel, domain-specific variations to create challenging models that address the trivial LLM memorization. Each equation is composed of both known and synthetic terms (highlighted in red). Known terms are terms that are commonly found in scientific equations and are well-documented in the literature for that domain and specific problem. For example, terms like $-C_0A(t)$ and $-C_0A(t)^2$ are typical in chemistry reactions as the first-order and second-order kinetics. These terms are included to ensure that the problems remain grounded in the established scientific context, providing a foundation for the LLM-based methods to build upon for equation discovery related to each scientific problem. On the other hand, synthetic terms are introduced to create novel variations in the problems to avoid trivial LLM memorization. For instance, terms like $\sin (\sqrt{A(t)})$ and $\cos (\log (A(t) + 1))$ in chemistry reaction kinetics are designed to challenge the LLM-based discovery models by introducing non-linearities and interactions that are not commonly seen in standard models. These terms are critical for testing the ability of LLM-based equation discovery models to generalize beyond memorization of standard known formulations and discover new patterns from data-driven reasoning and refinement. The combination of known and synthetic terms in LSR-Synth creates a dataset that is both challenging and representative of established scientific problems. This approach enables rigorous evaluation of models' capabilities in interpreting and discovering complex scientific equations, striking a balance between domain familiarity and innovative data-driven reasoning. To generate these known and synthetic terms across various domains, we leverage LLM (GPT-4o) by providing problem domain context and descriptions, prompting it to generate candidate terms. These suggested terms and equations are then filtered based on solvability and novelty criteria, followed by domain expert validation. + +Figure 10 provides an analysis of the complexity of the problems in the LSR-Synth dataset. Similar to Figure 8, complexity is quantified as the number of nodes in the expression tree. This figure highlights the diverse nature of the LSR-Synth dataset, with complexity levels ranging from simple expressions to highly complex ones. By spanning a wide range of domains (chemistry, physics, biology, and material science) and hypothesis complexities, LSR-Synth serves as a comprehensive dataset for evaluating the capabilities of LLMs in scientific equation discovery. + +Once the structure of equations is generated, their parameters (coefficients) are sampled randomly from specified and scientifically valid ranges, and then data are generated through different solution methods depending on the domain. For dynamical systems (chemical reactions, population dynamics, and physical oscillators), we employ numerical integration using SciPy's solve_ivp with the RK45 method, while static relationships (material stress-strain) are evaluated directly over predetermined input ranges. For each domain, we generate 5000 evenly spaced samples. In dynamical systems, these samples span the time interval $t \in [0,60]$ , while for material stress-strain relationships, the samples cover strain $\epsilon \in [0,0.6]$ and temperature $T \in [273,573]K$ . To evaluate out-of-distribution (OOD) generalization, for time-dependent systems, we designate the last 500 time points as the out-of-domain (OD) test set, with the remaining 4500 points used for in-domain (ID) training and validation. Similarly, for the stress-strain domain, the OOD test set comprises the last 500 points based + +# Example of LSR-Synth Problems with Known and Synthetic Terms + +$$ +\begin{array}{l} - C _ {0}. A (t) - C _ {1}. \sin \left(\sqrt {A (t)}\right) \\ - C _ {0}. A (t) ^ {2} - C _ {1}. \exp (- C _ {2}. t) + C _ {3}. \sin \left(C _ {4}. A (t)\right) \\ - C _ {0}. A (t) ^ {2} - C _ {1}. \sqrt {A (t)} - C _ {2}. \cos (\log (A (t) + 1)) \\ \end{array} +$$ + +Chemistry: + +Reaction rate with respect to + +Time and Concentration + +Known Terms + +Synthetic Terms + +$$ +r \left(1 - \frac {P (t)}{K _ {0}}\right). P (t) + r. \frac {P (t) ^ {2}}{\alpha P (t) + 1} +$$ + +$$ +r. P (t) + r \left(1 - \frac {P (t)}{K _ {0}}\right). P (t) + \beta P (t) \sin (\omega t) +$$ + +$$ +r \left(1 - \frac {P (t)}{K _ {0}}\right). P (t) + r \left(1 - \frac {P (t)}{K _ {0}}\right). \left(- 1 + \frac {P (t)}{\alpha}\right). P (t) + r. (1 - \exp (- \gamma P (t)). P (t) +$$ + +$$ +\left. C _ {0} \cdot \sin (t) - C _ {1} \cdot x (t) - C _ {2} \cdot x. \exp (- | x (t) |) \right. +$$ + +$$ +C _ {0}. \sin (t) - C _ {1}. x (t) ^ {3} - C _ {2}. \sin (x (t)). v (t) - C _ {3}. \sin (v (t)) +$$ + +$$ +C _ {0}. \sin (t) - C _ {1}. v (t) - C _ {2}. \sin (x (t)). v (t) + C _ {3}. x (t) ^ {2}. v (t) - C _ {4}. x (t). \exp (- | x (t) |) +$$ + +$$ +C _ {0} \cdot \left(1 - C _ {1} \cdot (T - T _ {0})\right). \epsilon + C _ {2} \cdot \exp \left(- (T - T _ {0}) ^ {2}\right). \epsilon +$$ + +$$ +C _ {0} \cdot \left(1 - C _ {1} \cdot \left(T - T _ {0}\right)\right). \epsilon - C _ {2} \left(T - T _ {0}\right) + C _ {3} \cdot \left(T - T _ {0}\right). \log (\epsilon + 1) +$$ + +$$ +C _ {0}. \big (1 - C _ {1}. (T - T _ {0}) \big). \epsilon + C _ {2}. \epsilon^ {C _ {3}}. \exp (- \frac {C _ {4}}{C _ {5} . T}) + C _ {6}. \exp (- (T - T _ {0}) ^ {2}). \epsilon +$$ + +Physics: + +Acceleration with respect to + +Time, Displacement, and Velocity + +Figure 9. Examples of LLM-SRBench (LSR-Synth) problems with known and synthetic terms across different domains. Each problem presents a target equation as the hypothesis to be discovered which is composed of known terms and synthetic terms (in blue). + +on temperature values, maintaining a consistent evaluation framework across all domains. The data generation process incorporates the same quality control criteria used in equation generation. Generated solutions must satisfy: (1) solvability within specified numerical tolerance, (2) meaningful physical behavior (avoiding divergence or constant solutions), and (3) uniqueness compared to existing solutions (using RMSE thresholds). These criteria ensure that the final dataset contains diverse, physically meaningful, and numerically stable solutions suitable for benchmarking equation discovery methods. + +# B. Evaluation Details + +# B.1. Data Fidelity + +We evaluate the data-driven performance of discovered equations through multiple complementary metrics focusing on both predictive accuracy and generalization capability. The primary metrics include Accuracy to Tolerance $(\mathrm{Acc}_{\tau})$ , and Normalized Mean Squared Error (NMSE). The $\mathrm{Acc}_{\tau}$ metric provides a binary assessment of prediction accuracy based on point-wise relative error. An equation is considered accurate if the maximum relative error across all test tolerance $\tau$ . Formally: + +$$ +\operatorname {A c c} _ {\tau} = \mathbb {1} \left(\max _ {1 \leq i \leq N _ {\text {t e s t}}} \left| \frac {\hat {y} _ {i} - y _ {i}}{y _ {i}} \right| \leq \tau\right) +$$ + +where $\hat{y}_i$ represents the predicted value, $y_i$ is the true value, and $N_{\mathrm{test}}$ is the number of test samples. The indicator function $\mathbb{1}(\cdot)$ returns 1 if the condition is satisfied and 0 otherwise. This metric is particularly useful for cases where maintaining a consistent level of accuracy across all predictions is crucial, as it identifies equations that might have occasional but significant deviations from the true values. NMSE also provides a continuous measure of the overall prediction quality, normalized by the scale of the true values: + +![](images/1ef59f871e9e9d07a925f7b0228547eee64a77dbbb598caf0d6216087b8b2bb6.jpg) +Figure 10. Distribution of problem complexity in LLM-SRBench (LSR-Synth) datasets across scientific domains. + +$$ +\mathrm {N M S E} = \frac {\sum_ {i = 1} ^ {N _ {\mathrm {t e s t}}} (\hat {y} _ {i} - y _ {i}) ^ {2}}{\sum_ {i = 1} ^ {N _ {\mathrm {t e s t}}} (y _ {i} - \bar {y} _ {i}) ^ {2}} +$$ + +This normalization makes the metric scale-invariant, allowing meaningful comparisons across different datasets and equation types. The NMSE ranges from 0 to $\infty$ , where 0 indicates perfect prediction. Unlike $\mathrm{Acc}_{\tau}$ , NMSE provides a more nuanced view of model performance by considering the magnitude of prediction errors across all test points rather than just their maximum relative error. Beyond standard predictive metrics, we also place particular emphasis on evaluation of out-of-distribution (OOD) generalization, a critical requirement for scientific equations. For datasets in LSR-Synth which have been generated synthetically, we evaluate the discovered hypotheses on held-out OOD test sets to also assess the extrapolation capabilities. The performance gap between in-domain and OOD test sets ( $\Delta \mathrm{NMSE}$ and $\Delta \mathrm{Acc}_{\tau}$ ) provides valuable insights into the generalizability of the discovered equations. + +# B.2. Symbolic Accuracy + +We introduce a novel evaluation methodology for equation discovery that leverages LLM (GPT-4o) as an evaluator for assessing mathematical equivalence between predicted and gold equation hypotheses. Traditional metrics in symbolic regression, such as recovery rate (La Cava et al., 2016), exact match, or normalized tree edit distance (Matsubara et al., 2022), often fail to capture the true semantic equivalence of mathematical expressions, especially when dealing with different representation formats or algebraically equivalent forms. Our approach employs GPT-4o as an automated evaluator, capable of analyzing symbolic equivalence across diverse representation formats including equation strings, expression trees, and executable programs. The evaluation process begins by pre-processing the hypotheses by (1) removing additional information (such as natural language comments in the case of programs), and (2) replacing constants with placeholder parameter vectors, focusing solely on logical structure and mathematical relations. To assess the reliability of this LLM-based symbolic evaluation approach for equation discovery, we conducted a human evaluation study. Two of the authors independently assessed mathematical symbolic equivalence on a set of 130 randomly sampled problems. The validation study revealed a $94.6\%$ agreement rate between GPT-4o and human evaluators, where agreement rate is calculated as the percentage of cases where both LLM and human evaluators made the same judgment about the mathematical equivalence between predicted and ground truth equations (123 out of 130). + +Figure 11 provides the prompt used for our GPT-4o based evaluation of the mathematical symbolic equivalence between the generated hypothesis (in the form of program or expression) against the ground truth equation. In this setting, the GPT-4o first articulates its mathematical reasoning before making an equivalence binary assessment. + +![](images/0c00954c33b422695f0cf22b16af7ff62ba75f2ae8dbc0e8c786e1780fa18f1b.jpg) +Figure 11. Symbolic assessment in equation discovery with GPT-4o as evaluator + +# C. Implementation Details + +For a comprehensive evaluation, we implement four state-of-the-art LLM-guided scientific equation discovery baselines, each tested on LLM-SRBench datasets with three different LLM backbones: an open-source model (Llama-3.1-8B-Instruct) and two closed-source models (GPT-3.5-turbo and GPT-4o-mini). + +# C.1. Parameters + +Table 2 presents the key implementation details for each discovery agentic method. We adopt most of the hyperparameters from the original implementation for these methods. We have only changed some hyperparameters in different baselines that affect the number of LLM calls in the search framework. This is to make sure we have a fair comparison across baseline discovery frameworks with same access budget to LLMs. In our experiments, all baseline frameworks have 1k calls to LLMs (per problem) through the discovery process. + +# C.2. Prompts + +# C.2.1. LLM-SR + +We use the default prompts from LLM-SR's (Shojae et al., 2024b) public code repo (https://github.com/deep-symbolic-mathematics/LLM-SR), which includes: + +# 1. Instruction prompt. + +You are a helpful assistant tasked with discovering mathematical function structures for scientific systems. Complete the 'equation' function below, considering the physical meaning and relationships of inputs. + +# 2. Evaluation specification prompt. + +```python +import numpy as np +#Initialize parameters +MAX_NPARAMS $= 10$ +params $= [1.0]*\mathrm{MAX\_NPARAM}$ +def evaluate(data:data) -> float: ""Evaluate the equation on data observations.""" +``` + +Table 2. Implementation details of LLM-based scientific equation discovery methods. + +
MethodParameters
Direct Prompting (DataBlind)Temperature τ = 0.85 equation program hypotheses sampled from LLM for initial promptNo access to data for data-driven refinementTime limit T = 30s per program hypothesis execution,BFGS optimizer from Scipy for parameter optimization of equation skeletons
SGA (Ma et al., 2024)PyTorch-based implementation of model and torch nn. Module classMean square error loss for data-driven feedback in agentic searchAdam optimizer in PyTorch for differential parameter optimization of equation skeletons
LaSR (Grayeli et al., 2024)Iterations = 25Cycles per iteration = 550Populations = 10Population size = 33Maximum size = 30Operators: +, *, -, /, ∧, exp, log, sqrt, sin, cos, tan, coshLLM weights: llm_mutate =0.005, llm_crossover =0.005, llm_gen_random =0.005Top-K = 20 concepts from libraryDefault configuration of PySR for parameter optimization
LLM-SR (Shojaee et al., 2024b)Temperature τ = 0.8Batch size b = 4 equation programs per prompte = 4 parallel evaluatorsTime limit T = 30s per program hypothesis,Memory limit M = 2GBm = 10 islands for population diversity through searchk = 2 in-context examples per promptMaximum 10 parameters per equation skeletonBFGS optimizer from Scipy for parameter optimization of equation skeletons
+ +```python +# Load data observations +inputs, outputs = data['inputs'], data['outputs'] +X = inputs +# Optimize parameters based on data +from scipy.optimize import minimize +def loss.params): + y_pred = equation(*X, params) + return np.mean((y_pred - outputs) ** 2) +loss_partial = lambda params: loss.params) +result = minimize(loss_partial, [1.0]*MAX_NPARAMS, method='BFGS') +# Return evaluation score +optimized.params = result.x +loss = result(fun +if np.isnan(loss) or np.isinf(loss): + return None +else: + return -loss +``` + +# 3. Equation example specification as Python programming function. + +```python +def equation_v0(\(INPUT VAR[0], ..., \)INPUT VAR[N], params): + ''' Mathematical function for {$OUTPUT VAR_DESC} +Args: + $INPUT VAR[0]: A numpy array representing observations of {$INPUT VAR_DESC[0]}. + ... + $INPUT VAR[N]: A numpy array representing observations of {$INPUT VAR_DESC[N]}. +params: Array of numeric constants or parameters to be optimized +Return: A numpy array representing {$OUTPUT VAR_DESC} as the result of applying the mathematical function to the inputs. +'''# Equation example 1 logic as function body +... +def equation_v1(\)INPUT VAR[0], ..., \)INPUT VAR[N], params): + # Equation example 2 +... +## Function to be completed +``` + +```python +def equation( $INPUT VAR[0], ...,$ INPUT VAR[N], params): + ''' Improvement version of equation_v0 and equation_v1''' +``` + +# C.2.2. LASR + +We use the default prompts from LaSR's (Grayeli et al., 2024) public code repository (https://github.com/trishullah/LibraryAugmentedSymbolicRegression.jl), which includes: + +1. The LLMINIT prompt, which is used in an LLM-augmented initialization operation. +2. LLMMUTATION prompt is used to mutate an expression based on a set of concepts. +3. LLMCROSSOVER prompt is used to construct a new expression from the crossover of two sampled expressions based on a set of concepts. +4. LLM Concept Abstraction prompt in CONCEPTABSTRACTION function, which extracts a natural language concept from current trends of hypotheses at each iteration. +5. LLM Concept Evolution prompt in CONCEPTEVOLUTION function, which creates a new concept that follows a set of ideas in the current library. + +In the following, we provide examples of these prompts. + +# 1. LLMINIT prompt. + +```handlebars + +You are a helpful assistant that proposes a mathematical expression by following three provided suggestions. An expression must consist of the following variables: {{variables}}. All constants will be represented with the symbol C. Each expression will only use these operators: {{operators}}. + +Suggestion 1: {{assume1}} +Suggestion 2: {{assume2}} +Suggestion 3: {{assume3}} +Propose {{N}} expressions that would be appropriate given the suggestions. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: +``'json +["expr1", "expr2", ... "expr{N}"] +] +``` + +# 2. LLMMUTATION prompt. + +```txt + +You are a helpful assistant that mutates a mathematical expression by following a few provided suggestions. You will be given three suggestions and a single reference expression to mutate. +An expression must consist of the following variables: $\{\{variables\}\}$ . All constants will be represented with the symbol C. Each expression will only use these operators: $\{\{\mathrm{operators}\}\}$ + +Suggestion 1:{\{assume1\}} +Suggestion 2:{\{assume2\}} +Suggestion 3:{\{assume3\}} +Reference Expression:{\{expr\}} +Propose $\{\{\mathbf{N}\}\}$ expressions that would be appropriate given the suggestions and references. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: +``'json +["expr1", "expr2", ... "expr.{N}"] +] +``` + +# 3. LLMCROSSOVER prompt. + +```latex + +You are a helpful assistant that recombines two mathematical expressions by following a few provided suggestions. You will be given three suggestions and two reference expressions to recombine. +An expression must consist of the following variables: $\{\{variables\}\}$ . All constants will be represented with the symbol C. Each expression will only use these operators: $\{\{\mathrm{operators}\}\}$ +``` + +```handlebars + +Suggestion 1: {{assume1}} +Suggestion 2: {{assume2}} +Suggestion 3: {{assume3}} +Reference Expression 1: {{expr1}} +Reference Expression 2: {{expr2}} +Propose {{N}} expressions that would be appropriate given the suggestions and references. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: +``'json +["expr1", "expr2", ... "expr.{N}" +] +``` + +# 4. LLM Concept Abstraction prompt. + +```txt + +You are a helpful assistant that hypothesizes about the underlying assumptions that generated a list of good and bad mathematical expressions in detailed ways. My ultimate goal is to discover what assumptions generated the observed good mathematical expressions and excludes the bad mathematical expressions. Focus more on the good expressions, their mathematical structure, and any relation to physical concepts. Note that capital C represents an arbitrary constant +``` + +```txt + +Good Expression 1: {gexpr1} +Good Expression 2: {gexpr2} +Good Expression 3: {gexpr3} +Good Expression 4: {gexpr4} +Good Expression 5: {gexpr5} +Bad Expression 1: {bexpr1} +Bad Expression 2: {bexpr2} +Bad Expression 3: {bexpr3} +Bad Expression 4: {bexpr4} +Bad Expression 5: {bexpr5} +Propose $\{\{N\}\}$ hypotheses that would be appropriate given the expressions. Provide short commentary for each of your decisions. Do not talk about topics related to the simplicity or complexity of the expressions. I want ideas that are unique and interesting enough to amaze the world's best mathematicians. End with a JSON list that enumerates the proposed hypotheses following this format: +``'json +["hyp1", "hyp2", ... "hyp.{N}]'' +] +``` + +# 5. LLM Concept Evolution prompt. + +```txt + You are an insightful assistant skilled in logical reasoning and deduction. Your task is to analyze a set of ideas and infer nontrivial conclusions that logically follow from them. The ultimate goal is to uncover underlying principles or properties of the hidden expressions. Focus on providing logical conclusions that are unique, interesting, and profound. +``` + +```txt + +Idea 1:{ideal} +Idea 2:{idea2} +Idea 3:{idea3} +Idea 4:{idea4} +Idea 5:{idea5} +Based on these ideas, deduce $\{\{N\}\}$ logical conclusions or hypotheses that directly follow from them. Provide a brief explanation for each conclusion, highlighting the logical connections between the ideas. Avoid discussing topics related to the simplicity or complexity of the expressions. Conclude with a JSON list that enumerates the proposed conclusions in the following format: +``'json +["Conclusion 1", "Conclusion 2", ... "Conclusion {{N}}]" +] +``` + +# C.2.3. SGA + +The following prompts are used in our implementation of SGA (Ma et al., 2024) for scientific equation discovery tasks, following the original implementation SGA's public code repository (https://github.com/PingchuanMa/SGA), which includes: + +System prompt for task. + +Code formatting prompt for scientific equation discovery task. +```txt +You are an intelligent AI assistant for coding and scientific equation discovery. +You are tasked with discovering mathematical function structures for scientific systems. +Follow the user's requirements carefully and make sure you understand them. +Keep your answers short and to the point. +Do not provide any information that is not requested.. +Always document your code as comments to explain the reason behind them. +Use Markdown to format your solution. +You are very familiar with Python and PyTorch. +Do not use any external libraries other than the libraries used in the examples. +``` + +```python +## PyTorch Tips +1. When working with tensors, always use PyTorch's operators (such as 'torch.exp', 'torch.cos', 'torch.sqrt', ...) to ensure compatibility and optimal performance. +2. In PyTorch, operator input arguments must be tensors, not floats. +## Code Requirements +1. The only library allowed is PyTorch. Follow the format provided by the user examples. +2. Annotate the size of the tensor as comment after each tensor operation. For example, # (B, 3, 3). +3. Separate the code into parameters that can be tuned with differentiable optimization and the symbolic expression represented by PyTorch code. Define them respectively in the +5. The proposed code must strictly follow the structure and function signatures below: +``'python +import torch +import torch(nn as nn) +class SymbolicEquation(nn.Module): + def __init__(self, {PARAM_INPUTS}): + Define trainable continuous parameters for differentiable optimization. + Tentatively initialize the parameters with the default values in args. + Args: + {PARAM_DESCRIPTION} + super().__init__() + {PARAM_INIT} + def forward(self, {INPUT_variables}) -> torch.Tensor: + {FORWARD_FUNCTIONDescriptions} +``` + +```txt +1. Analyze step-by-step what the potential problem is in the previous iterations based on the feedback. Think about why the results from previous iterations mismatched with the ground truth. Do not give advice about how to optimize. Focus on the formulation of the scientific equation. Start this section with "#Analysis". Analyze all iterations individually, and start the subsection for each iteration with "#Iteration N", where N stands for the index. Remember to analyze every iteration in the history. +``` + +Context prompt for each scientific problem. +```txt +2. Think step-by-step what you need to do in this iteration. Think about what is needed to improve performance. If the analysis suggests specific functional forms or constraints, think about how these will be incorporated into the symbolic equation. Think about how to separate your algorithm into a continuous parameter part and a symbolic expression model part. Describe your plan in pseudo-code, written out in great detail. Remember to update the default values of the trainable parameters based on previous optimizations. Start this section with "# Step-by-Step Plan". +``` + +```txt +3. Output the code in a single code block ''``python ... ''`` with detailed comments in the code block. Do not add any trailing comments before or after the code block. Start this section with "# Code". +``` + +# # # Context + +The objective is to construct a mathematical expression that accurately maps input variables to a target output based on a provided dataset. The task involves filling in a code block to define a symbolic expression or model that minimizes the difference between predicted and ground-truth outputs. The code block defines a class with two functions: one for parameters within the expression and another for generating or modifying the symbolic structure of the expression. Feedback is provided in the form of metrics measuring the error between the model's predictions and the ground-truth values, as well as guidance on structural improvements to the symbolic expression. + +The expression represents $\{$ OUTPUT VAR DESC\}, given data on $\{$ INPUTS DESC\}. + +# D. Additional Results and Analysis + +Detailed Numeric Accuracy Analysis. While Table 1 presents median Normalized Mean Squared Error for each method-LLM combination across LLM-SRBench datasets, Figure 12 provides a more comprehensive view of error distributions across all samples. These box plots illustrate performance variations across LLM-SRBench datasets from two perspectives: + +comparing different equation discovery methods with GPT-4o-mini as the LLM backbone, and examining different LLM backbones when using LLM-SR method. The substantial variance in NMSE performance across samples reflects the diverse complexity inherent in our benchmark—stemming from both the varying mathematical transformations in LSR-Transform and the different combinations of known and synthetic terms in LSR-Synth datasets. Notably, the relative difficulty of datasets varies across methods and LLM backbones, suggesting that different methods and LLMs possess distinct capabilities in terms of leveraging domain knowledge, reasoning, and generating novel hypotheses. + +![](images/f5679f3c4d121b4ea5c20faf879d882e4987e7cc96f2c511c1b25316fcd262e4.jpg) +Figure 12. Normalized Mean Squared Error (NMSE) of discovered equations in various domains of LLM-SRBench with respect to (left) different equation discovery methods using GPT-4omini LLM backbone, and (right) different LLM backbones using LLM-SR method + +![](images/7287530c601c8f7a06f0551faaeaa9113407b59a179cd6bf6b36c8bedb772eb5.jpg) + +Symbolic Accuracy and Generalization. For scientific equation discovery methods, both symbolic accuracy and out-of-domain generalization serve as crucial evaluation metrics, reflecting the methods' ability to uncover true governing equations. Figure 13 examines the relationship between these metrics, plotting symbolic accuracy against both OOD accuracy and OOD NMSE across all method-LLM-domain combinations in LSR-Synth. The strong correlation observed between symbolic and OOD performance yields two important insights: first, it establishes OOD evaluation as a powerful metric for assessing the discovery of generalizable equations, an approach historically underutilized in symbolic regression; second, it validates our LLM-based symbolic evaluation approach through its strong alignment with numeric generalization performance. + +Qualitative Analysis of Outputs. To provide deeper insights into the behavior of different discovery methods, Figure 14 illustrates their final discovered hypotheses on a biological population growth problem (BPG0) using Llama-3.1-8B as the LLM backbone. Direct Prompting (Figure 14(a)) generates equations that capture basic population dynamics, demonstrating LLMs' ability to propose scientifically plausible structures. SGA's solution (Figure 14(b)) successfully incorporates one of the common population growth terms while exploring additional structural components. LaSR (Figure 14(c)) discovers an equation structure that combines multiple interaction terms, though it differs from established scientific formulations. LLM-SR (Figure 14(d)) combines both standard population dynamics terms and synthetic components in its solution. These examples demonstrate the diverse approaches methods take in balancing scientific interpretability with mathematical expressiveness when discovering equation structures. + +# E. Discussion and Future Directions + +Our findings from LLM-SRBench reveal several key insights that inform the design of future LLMs for scientific discovery applications. Scientific equation discovery remains a challenging problem for LLMs, requiring a complex interplay of domain knowledge, search capabilities with data-driven feedback, and mathematical manipulation skills. Our results demonstrate that this problem poses significant challenges for LLM-based discovery frameworks across different model architectures, suggesting that current approaches may be fundamentally limited in their ability to perform genuine scientific discovery. + +This work questions the current evaluation paradigm for equation discovery in emerging LLM-based techniques. We + +![](images/221b1498b3f8e47e6bf90ed1d149435f76b18381710e3d5b0115980fca2e9cd3.jpg) +Figure 13. Symbolic Accuracy versus OOD performance over all domains, methods, and backbone LLM pairs. + +demonstrate that existing benchmarks for this task are susceptible to memorization and inadequate for evaluating these techniques' true scientific discovery capabilities. Motivated by these limitations, we designed LLM-SRBench to address the memorization issue through two key innovations: synthetic imaginary scenarios (LSR-Synth category) that are not based on existing scientific knowledge and require data-driven discovery tools for solution, and transformed equations (LSR-Transform category) that convert common forms of scientifically known equations into less familiar formulations. The LSR-Synth category targets genuine innovation in LLM-based discovery techniques by eliminating the possibility of recalling memorized equations, while LSR-Transform problems are difficult to recite from memory and require reasoning over hypothesis generation steps, making them suitable candidates for evaluating recently emerging LLM-based scientific discovery agents. While the mathematical transformations in LSR-Transform are algebraically valid, their scientific meaningfulness varies considerably across contexts. Many transformations correspond to legitimate physics problems from the Feynman Lecture Series collection and represent alternative problem formulations with practical significance. For example, in the Harmonic Oscillator Energy problem, the original formulation $E = \frac{1}{4} m(\omega^2 + \omega_0^2)x^2$ expresses energy as a function of system parameters, while the transformed version $m = \frac{4E}{(\omega^2 + \omega_0^2)x^2}$ determines the mass required for given energy storage. This transformation maintains scientific meaning by addressing the engineering question of what mass is needed to store a specific amount of energy in an oscillating system, and such inversions are common in engineering design problems where system parameters must be determined to achieve desired performance characteristics. Similarly, the Electric Potential problem transforms from $V_e = \frac{1}{4\pi\epsilon}\frac{p_d\cos(\theta)}{r^2}$ (potential at a point due to a dipole) to $r = \sqrt{\frac{p_d\cos(\theta)}{4\pi\epsilon V_e}}$ (distance for a given potential), addressing the practical question of determining measurement distances in electrostatic experiments or sensor design. + +However, not all transformations maintain clear physical interpretability. Some result in equations where the target variable appears in complex functional forms that may not correspond to natural physical questions, such as solving for angular frequency in oscillatory systems yielding expressions involving square roots of differences that lack intuitive physical meaning. Additionally, certain transformations may obscure natural causal relationships—transforming from “force causes acceleration” to “acceleration determines force” maintains mathematical validity but may not reflect underlying physical causality. The LSR-Transform category represents a deliberate balance between mathematical rigor and physical meaningfulness by constraining the complexity of transformed problems to match original problems, focusing on semantic rather than syntactic challenges in scientific equation discovery, while maintaining the original scientific context and variable meanings to ensure that underlying physics remains relevant even when mathematical formulation changes. The varying scientific meaningfulness of transformations reflects broader challenges in automated scientific discovery that warrant future investigation. Automated discovery systems must incorporate mechanisms to evaluate not only data-driven correctness but also scientific plausibility and interpretability of generated hypotheses, as mathematical validity alone is insufficient for meaningful scientific contribution. The most effective approach to scientific equation discovery likely involves close collaboration between AI systems, which excel at exploring vast hypothesis spaces, and human domain scientists, who can + +assess scientific meaningfulness and guide discovery directions based on deep contextual understanding. Future equation discovery methods could improve by incorporating literature retrieval tools to build grounding foundations for scientific context and domain knowledge, helping to prioritize discoveries that are mathematically valid, data-consistent, novel, and scientifically meaningful. The field needs evaluation frameworks that assess not just mathematical correctness but also scientific novelty, interpretability, and practical applicability of discovered equations, moving beyond narrow accuracy metrics toward a more comprehensive understanding of what constitutes valuable scientific discovery in the age of LLMs with their vast scientific knowledge. + +# F. Comparison with Standard (non-LLM) Symbolic Regression Baselines + +To further validate the utility of LLM-SRBench and demonstrate the advantages of LLM-based approaches, we conducted additional experiments comparing LLM-based methods with traditional symbolic regression techniques that do not incorporate domain knowledge. We evaluated PySR (Cranmer, 2023), a state-of-the-art symbolic regression method based on genetic programming, on all LLM-SRBench datasets. PySR operates purely on numerical data points without access to the scientific context, variable descriptions, or domain knowledge that LLM-based methods can leverage in discovery process. We used PySR's default configuration with the same computational budget (equivalent number of evaluations) as the LLM-based methods to ensure fair comparison. Table 3 presents the performance comparison between the best-performing LLM-based method from Table 1 and PySR across all LLM-SRBench datasets. The results reveal several key insights about the complementary strengths and limitations of non-LLM versus LLM-based approaches in equation discovery. + +PySR demonstrates competitive and sometimes even better numerical accuracy $(\mathrm{Acc}_{0.1})$ across all datasets. However, PySR consistently shows significantly lower symbolic accuracy, particularly struggling with non-physics domains where it achieves $0\%$ symbolic accuracy on chemistry, biology, and material science datasets. The performance gap is most pronounced in problems that require specialized scientific knowledge. While PySR can fit mathematical patterns in the data, it lacks the scientific intuition to discover equations that align with established physical principles or domain-specific terminology. Interestingly, PySR shows relatively better performance on physics problems, achieving modest symbolic accuracy of $4.54\%$ on LSR-Synth Physics and $8.11\%$ on LSR-Transform (which is based on Feynman physics equations). This suggests that physics problems may contain mathematical patterns that are more aligned with the dictionary design in PySR. So they can be discovered better through the data-driven search pipeline designed in PySR. These findings strengthen the motivation for LLM-based scientific equation discovery and demonstrate that LLM-SRBench successfully captures challenges in equation discovery that traditional symbolic regression methods cannot adequately address through numerical data-driven optimization alone. + +Table 3. Performance comparison between LLM-based methods and state-of-the-art non-LLM symbolic regression baseline PySR on LLM-SRBench. SA = Symbolic Accuracy (%), Acc0.1 = Accuracy to tolerance 0.1 (%). + +
Dataset (Metric)LLM-SR (best) SA / Acc0.1LaSR (best) SA / Acc0.1SGA (best) SA / Acc0.1PySR SA / Acc0.1
LSR-Transform31.53 / 39.6412.61 / 50.459.91 / 8.118.11 / 56.76
LSR-Synth Chemistry11.11 / 66.662.77 / 38.920 / 16.660 / 41.67
LSR-Synth Biology25.30 / 58.338.33 / 20.834.16 / 12.510 / 25.0
LSR-Synth Physics9.91 / 36.369.91 / 31.814.54 / 9.094.54 / 29.55
LSR-Synth Material Science20.24 / 88.2828.12 / 72.040 / 36.110 / 68.0
+ +Table 4: LSR-Synth mathematical equations for each scientific domain. + +
DomainEquation IDEquation
ChemistryCKR1-kA(t)2+kzA(t)2/(βA(t)4+1)
CKR2-kA(t)2-kA(t)+kw cos(log(A(t)+1))
CKR3-kA(t)+kw cos(log(A(t)+1))
+ +Continued on next page + +Table 4 - continued from previous page + +
DomainEquation IDEquation
CKR4-kA(t)2-kA(t) exp(-ks t)+kw cos(log(A(t)+1))
CKR5-kA(t)2+kqA(t) log(γt+1)
CKR6-k√(A(t)+kfA(t)0.33
CKR7-kA(t) exp(-ks t)+km sin(√A(t))
CKR8-kA(t) exp(-ks t)+kw cos(log(A(t)+1))
CKR9-kA(t)2-kA(t)+kt sin(log(A(t)+1))
CKR10-k√A(t)+kw cos(log(A(t)+1))
CKR11-kA(t)2+kt sin(log(A(t)+1))
CKR12-kA(t)2+km sin(√A(t))
CKR13-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR14-kA(t)+kp sin(ωA(t))
CKR15-k√A(t)-kA(t) exp(-ks t)+kp sin(ωA(t))
CKR16-k√A(t)-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR17-kA(t)+kfA(t)0.33
CKR18-kA(t) exp(-ks t)+kfA(t)0.33
CKR19-kA(t)2+kp sin(ωA(t))
CKR20-kA(t)2-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR21-kA(t) exp(-ks t)+kp sin(ωA(t))
CKR22-kA(t) exp(-ks t)+kqA(t) log(γt+1)
CKR23-kA(t)2-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR24-k√A(t)+kp sin(ωA(t))
CKR25-k√A(t)-kA(t)2+kfA(t)0.33
CKR26-kA(t)+kt sin(log(A(t)+1))
CKR27-kA(t)2-kA(t) exp(-ks t)+km sin(√A(t))
CKR28-kA(t)2-kA(t) exp(-ks t)+kfA(t)0.33
CKR29-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR30-kA(t)-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR31-kA(t)-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR32-k√A(t)-kA(t)+kw cos(log(A(t)+1))
CKR33-kA(t)-kA(t) exp(-ks t)+kfA(t)0.33
CKR34-k√A(t)-kA(t)2+kt sin(log(A(t)+1))
CKR35-kA(t)2+kfA(t)0.33
CKR36-kA(t)+kqA(t)log(γt+1)
+ +Continued on next page + +Table 4 - continued from previous page + +
DomainEquation IDEquation
BiologyBPG1r(1-P(t)/K0)P(t)+rP(t)0.33
BPG2rP(t)exp(-γt)+rP(t)2/(αP(t)+1)
BPG3βP(t)sin(ωt)+rP(t)exp(-γt)
BPG4r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
BPG5r(1-P(t)/K0)P(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG6r(1-P(t)/K0)P(t)+rP(t)2/(αP(t)+1)
BPG7-QαP(t)+r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG8r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+rP(t)0.33
BPG9r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG10r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
BPG11rP(t)0.33+rP(t)
BPG12r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)exp(-γt)
BPG13βP(t)sin(ωt)+r(1-P(t)/K0)P(t)
BPG14r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG15r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)+rP(t)exp(-γt)
BPG16rP(t)0.33+rP(t)exp(-γt)
BPG17r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG18r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)0.33
BPG19βP(t)sin(ωt)+r(1-P(t)/K0)P(t)+rP(t)
BPG20r(1-P(t)/K0)P(t)+rP(t)/tα
BPG21r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG22r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)/tα
BPG23r(1-exp(-γP(t)))P(t)+rP(t)exp(-γt)
BPG24r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
PhysicsPO1F0sin(t)-βsin(v(t))-ω02x(t)3-ω02x(t)exp(-|x(t)|)
PO2F0sin(t)-ω02x(t)-ω02x(t)exp(-|x(t)|)
PO3-αv(t)3-μ(1-x(t)2)v(t)-ω02x(t)-ω02x(t)exp(-|x(t)|)
PO4F0sin(t)-βsin(v(t))-2βv(t)
PO5F0sin(t)-αv(t)3-ω02(γ|v(t)|0.33+1)x(t)-ω02x(t)
PO6-βsin(v(t))-2βv(t)-ω02(γ|v(t)|0.33+1)x(t)-ω02x(t)3-ω02x(t)
PO7-βlog(|v(t)|+1)-2βv(t)-ω02x(t)3
PO8-αv(t)3-β|v(t)|0.33-ω02x(t)3
+ +Continued on next page + +Table 4 - continued from previous page + +
DomainEquation IDEquation
PO9-β|v(t)|0.33 - ω02x(t)3
PO10F0sin(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)
PO11F0sin(t) - ω02(γt+1)x(t) - ω02x(t)3 - ω02x(t)
PO12-βsin(v(t)) - ω02(γt+1)x(t) - ω02x(t)3
PO13F0sin(t) - αv(t)3 - β|v(t)|0.33 - ω02(γt+1)x(t) - ω02x(t)
PO14F0sin(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO15F0sin(t) - βlog(|v(t)| + 1) - βsin(v(t)) - 2βv(t) - μ(1-x(t)2)v(t)
PO16F0sin(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t) - ω02x(t) exp(-|x(t)|)
PO17F0sin(t) - βsin(x(t))v(t) - βsin(v(t)) - ω02x(t)3
PO18F0sin(t) - βsin(x(t))v(t) - 2βv(t) - ω02x(t)
PO19-βsin(x(t))v(t) - ω02x(t)
PO20-2βv(t) - ω02x(t) exp(-|x(t)|)
PO21-αv(t)3 - β log(|v(t)| + 1) - 2βv(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO22F0sin(t) - βsin(x(t))v(t)
PO23-2βv(t) - β exp(-|x(t)|)v(t) - μ(1-x(t)2)v(t) - ω02x(t)3
PO24F0sin(t) - βlog(|v(t)| + 1) - ω02x(t) exp(-|x(t)|)
PO25F0sin(t) - αv(t)3 - β log(|v(t)| + 1)
PO26F0sin(t) - βsin(v(t))
PO27F0sin(t) - βlog(|v(t)| + 1) - 2βv(t) - ω02x(t)3
PO28F0sin(t) - αv(t)3 - 2βv(t) - βexp(-|v(t)|)v(t)
PO29-2βv(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3 - ω02x(t)
PO30-μ(1-x(t)2)v(t) - ω02(γt+1)x(t) - ω02x(t)3
PO31-αv(t)3 - βsin(x(t))v(t) - βsin(v(t)) - ω02x(t)3
PO32-ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3
PO33F0sin(t) - αv(t)3 - βexp(-|v(t)|)v(t) - ω02x(t)3
PO34-2βv(t) - μ(1-x(t)2)v(t) - ω02(γt+1)x(t) - ω02x(t)
PO35-2βv(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO36F0sin(t) - βsin(v(t)) - ω02(γ|v(t)|0.33 + 1)x(t)
PO37F0sin(t) - βexp(-|x(t)|)v(t)
PO38F0sin(t) - αv(t)3 - 2βv(t) - ω02(γt+1)x(t)
PO39-βsin(v(t)) - μ(1-x(t)2)v(t) - ω02x(t) exp(-|x(t)|)
PO40F0sin(t) - αv(t)3 - βexp(-|x(t)|)v(t) - μ(1-v(t)2)v(t)
PO41F0sin(t) - β|v(t)|0.33 - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3 - ω02x(t)
+ +Continued on next page + +Table 4 - continued from previous page + +
DomainEquation IDEquation
PO42-μ(1-x(t)2)v(t)-ω02x(t)exp(-|x(t)|)
PO43F0sin(t)-αv(t)3-βsin(x(t))v(t)-2βv(t)
PO44F0sin(t)-βsin(x(t))v(t)-2βv(t)-μ(1-x(t)2)v(t)-ω02x(t) exp(-|x(t)|)
MaterialMatSci1E0ε(-αT(T-T0)+1)-β(T-T0)+εMη(T-T0)
MatSci2Hε3+KεNexp(-Q/(RT))+εηsin(T-T0)
MatSci3Hε3+η(T-T0)exp(-ε)
MatSci4Hε3+KεNexp(-Q/(RT))+ε3η(T-T0)
MatSci5E0ε2+η(T-T0)log(ε+1)
MatSci6E0ε(-αT(T-T0)+1)+KεNexp(-Q/(RT))+εMη(T-T0)
MatSci7E0ε(-αT(T-T0)+1)+εη(T-T0)2
MatSci8Hε3-β(T-T0)+η(T-T0)log(ε+1)
MatSci9E0ε(-αT(T-T0)+1)+εMη(T-T0)
MatSci10Hε3-β(T-T0)+ε3η(T-T0)
MatSci11Hε3+KεNexp(-Q/(RT))+εη(T-T0)2
MatSci12KεNexp(-Q/(RT))+ε3η(T-T0)
MatSci13E0ε(-αT(T-T0)+1)+KεNexp(-Q/(RT))+εηexp(-(T-T0)2)
MatSci14-β(T-T0)+εηexp(-(T-T0)2)
MatSci15-β(T-T0)+εMη(T-T0)
MatSci16E0ε(-αT(T-T0)+1)+εηexp(-(T-T0)2)
MatSci17E0ε2+εη(T-T0)2
MatSci18E0ε(-αT(T-T0)+1)-β(T-T0)+η(T-T0)log(ε+1)
MatSci19Hε3+η(T-T0)sin(ε)
MatSci20E0ε2-β(T-T0)+ε3η(T-T0)
MatSci21E0ε2+εηsin(T-T0)
MatSci22KεNexp(-Q/(RT))-β(T-T0)+η(T-T0)log(ε+1)
MatSci23E0ε(-αT(T-T0)+1)+Hε3+η(T-T0)sin(ε)
MatSci24KεNexp(-Q/(RT))+εηsin(T-T0)
MatSci25E0ε2+E0ε(-αT(T-T0)+1)+η(T-T0)log(ε+1)
+ +Ground Truth: $\frac{dP}{dt} = 0.9540\left(1 - \frac{P}{96.9069}\right)P + 0.9540P^{\frac{1}{3}}$ +a) Predicted: Direct (Llama-3.1-8b) +```python +def equation(t: np.ndarray, P: np.ndarray, params: np.ndarray) -> np.ndarray: + ''' Mathematical function for Population growth rate +Args: + t: A numpy array representing observations of Time. + P: A numpy array representing observations of Population at time t. + params: Array of numeric constants or parameters to be optimized +Return: + A numpy array representing Population growth rate as the result of applying the mathematical function to the inputs. +''' output = ( + params[0] * P # A linear term based on population size + + params[1] * P ** 2 # A squared term for birth or death ratio + + params[2] * t + params[3] * P * t # Linear and linear cross term + + params[4] +) +return output +``` + +b) Predicted: SGA (Llama-3.1-8b) +```python +class SymbolicEquation(nnModule): + def __init__(self, r: float = 0.5, K: float = 100.0, time_scale: float = 0.05): + Define trainable continuous parameters for differentiable optimization. + Tentatively initialize the parameters with the default values in args. + Params: + r (float): growth rate. + K (float): carrying capacity. + time_scale (float): time-dependent coefficient. + super().__init() + self.r = nn_PARAMETER(torch.tensor(r, dtype=torch.float)) # (1,) + self.K = nn_PARAMETER(torch.tensor(K, dtype=torch.float)) # (1,) + self.time_scale = nn_PARAMETER(torch.tensor(time_scale, dtype=torch.float)) + # # (1,) + # self.r = nn_PARAMETER(torch.tensor(r, dtype=torch.float)) +def forward(self, t: torch.Tensor, P: torch.Tensor) -> torch.Tensor: + ""Mathematical function for Population growth rate + Params: + t: Time. + P: Population at time t. + Return: + dP_dt: Population growth rate. + "" + # # (B,) + dP_dt = self.r * torch.exp(-self.time_scale*t) * P * (1 - P / self.K) # (B,) +return dP_dt +``` + +c) Predicted: LaSR (Llama-3.1-8b) + +$$ +\frac {d P}{d t} = \left(- 9 1 0. 3 1 - \left(\left(P \cdot (P + \sqrt {P})\right) + \log \left((t + 4. 6 3 9 6) - (- 0. 6 6 0 9 5)\right)\right) \cdot \sin \left(\frac {- 0 . 9 5 5 7 2}{e ^ {t} - t}\right)\right) + 9 1 0. 3 +$$ + +d) Predicted: LLM-SR (Llama-3.1-8b) + +def equation(t: np.ndarray, P: np.ndarray, params: np.ndarray) -> np.ndarray: + +Mathematical function for Population growth rate with a logistic growth model, decay, and periodicity. + +Args: + +t: A numpy array representing observations of Time. + +P: A numpy array representing observations of Population at time t. + +params: Array of numeric constants or parameters to be optimized (k, M, r, a, b, A, w, tau) + +Returns: + +A numpy array representing Population growth rate as the result of applying the mathematical function to the inputs. + +1 1 + +Apply transformations to inputs (if needed) + +$t = t / np.\max(t)$ #Normalize time + +P = P / np.max(P) #Normalize population + +Compute the rates using logistic growth model + +$\mathrm{dP\_dt} = \mathrm{params}[0] * \mathrm{P} * (1 - \mathrm{P}) - \mathrm{params}[1] * \mathrm{P} - \mathrm{params}[2] * \mathrm{P}**\mathrm{params}[3]$ + +Compute the periodicity factor + +periodic_factor = params[4] * np.sin(param[5] * (t - params[6]) + params[7]) + +Combine growth rate and periodicity + +$\mathrm{dP\_dt\_with\_periodicity} = \mathrm{dP\_dt} * \mathrm{periodic\_factor}$ + +return dP_dt_with_periodicity + +Figure 14. Example of output hypotheses from different LLM scientific equation discovery methods for BPG0 problem in LSR-Synth biology domain. + +Ground Truth: + +$$ +0. 1 9 \cdot A (t) + 0. 1 9 \cdot \cos (\log (A (t) + 1)) +$$ + +# a) Predicted: Direct (Llama-3.1-8b) + +```python +def equation(t: np.ndarray, A: np.ndarray, params: np.ndarray) -> np.ndarray: + "" Mathematical function for Rate of change of concentration (Growth, Reaction or Decay) + Args: + t: A numpy array representing observations of Time. + A: A numpy array representing observations of Concentration at time t. + params: Array of numeric constants or parameters to be optimized. + - params[0]: k (Rate constant, e.g., 0.1 min^-1) + - params[1]: C0 (initial concentration, e.g., 10) for decay or growth + models + Return: + A numpy array representing Rate of change of concentration as the result of applying the mathematical function to the inputs. + - equation implemented under assumption that k*A represents linear decay, or linear growth. + output = params[0] * A + params[1] * params[2] + return output +``` + +# b) Predicted: SGA (Llama-3.1-8b) + +```txt +class SymbolicEquation(nnModule): def init(self,k:float $= 9.00\mathrm{e} - 02$ A_0:float $= 9.60\mathrm{e} - 01$ A_t:float $= 9.60\mathrm{e} - 01$ .. Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args. Args: k(float):time-dependent reaction rate constant. A_0(float):initial concentration. A_t(float):time-dependent change in concentration. super().__init_() #(1,) self.k $\equiv$ nn.Parameters(torch.tensor(k,dtype $\coloneqq$ torch.float)) self.A_0 $\equiv$ nn.Parameterrtorch.tensor(A_0,dtype $\coloneqq$ torch.float) # (1,) self.A_t $\equiv$ nn.Parameterrtorch.tensor(A_t,dtype $\coloneqq$ torch.float) # (1,) def forward(self,t:torch.Tensor,A:torch.Tensor) -> torch.Tensor: ""Mathematical function for Rate of change of concentration in chemistry reaction kinetics Args: t:Time.#(B,1) A:Concentration at time t.#(B,1) Return: dA_dt:Rate of change of concentration in chemistry reaction kinetics. (# (B,1) +``` + +c) Predicted: LaSR (Llama-3.1-8b) + +$$ +\left(- 0. 0 0 4 0 4 0 2 / \left(1. 3 2 2 2 ^ {t}\right)\right) * \left(\left(A ^ {t}\right) ^ {\cos \left(\left(\left(t + \left(\left(A / (A / A)\right) - 0. 0 4 3 4 7 7\right)\right) ^ {A}\right) - A\right) + \left(\left(0. 7 5 6 2 9 ^ {1. 2 0 2 8} t\right)\right)\right)} \left. \right. +$$ + +d) Predicted: LLM-SR (Llama-3.1-8b) + +```python +def equation(t: np.ndarray, A: np.ndarray, params: np.ndarray) -> np.ndarray: + "" Mathematical function for Rate of change of concentration in chemistry reaction kinetics + Args: + t: A numpy array representing observations of Time. + A: A numpy array representing observations of Concentration at time t. + params: Array of numeric constants or parameters to be optimized + Return: + A numpy array representing Rate of change of concentration in chemistry reaction kinetics as the result of applying the mathematical function to the inputs. + output = params[0] * A + params[1] * A**2 + params[2] * A**3 + params[3] * t + params[4] + return output +``` + +Figure 15. Example of output hypotheses from different LLM scientific equation discovery methods for CKR2 problem in LSR-Synth chemistry domain. + +# Ground Truth: + +$$ +3 1. 1 2 \cdot \varepsilon \cdot (- 0. 0 5 \cdot (T - 3 1 6. 1 9) + 1) - 0. 2 3 \cdot (T - 3 1 6. 1 9) + \varepsilon^ {0. 6 0} \cdot 3. 6 3 \cdot (T - 3 1 6. 1 9) +$$ + +a) Predicted: Direct (Llama-3.1-8b) +```python +def equation(eppsilon: np.ndarray, T: np.ndarray, params: np.ndarray) -> np.ndarray: + "" Mathematical function for Stress +Args: + epsilon: A numpy array representing observations of Strain. + T: A numpy array representing observations of Temperature. + params: Array of numeric constants or parameters to be optimized +Return: + A numpy array representing Stress as the result of applying the mathematical function to the inputs. + "" + reference_temperature = 0 + sigma_0 = params[0] + alpha = params[1] + n = params[2] + linear_epsilon_term = params[3] * epsilon + multiplicative_epsilon_term = params[4] * epsilon + linear_temperature_term = params[5] * T + output = sigma_0 * (1 + alpha * (T - reference_temperature)) * epsilon** + linear_epsilon_term + multiplicative_epsilon_term + linear_temperature_term + return output +``` + +b) Predicted: SGA (Llama-3.1-8b) +```python +class SymbolicEquation(nnModule): def__init__(self,c_0:float $= 9.83\mathrm{e} - 01$ ,c_1:float $= 9.86\mathrm{e} - 01$ ,c_2:float $= 9.83\mathrm{e} - 01$ a:float $= -3.73\mathrm{e} - 03$ ,b:float $= -1.11\mathrm{e} - 02$ ,d:float $= 2.63\mathrm{e} - 02$ : Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args. +Args: c_0(float): coefficient. c_1(float): coefficient. c_2(float): coefficient. a(float): coefficient for quadratic term. b(float): coefficient for exponential term. d(float): coefficient for non-linear relationship between Strain and Temperature. super()._init_(self.c_0=nn.Parameters(torch.tensor(c_0,dtype $\equiv$ torch.float)) # (1,) self.c_1 = nn.Parameter(torch.tensor(c_1,dtype $\equiv$ torch.float)) # (1,) self.c_2 = nn.Parameter(torch.tensor(c_2,dtype $\equiv$ torch.float)) # (1,) self.a = nn.Parameter(torch.tensor(a,dtype $\equiv$ torch.float)) # (1,) self.b = nn.Parameter(torch.tensor(b,dtype $\equiv$ torch.float)) # (1,) self.d = nn.Parameter(torch.tensor(d,dtype $\equiv$ torch.float)) # (1,) +def forward(self,epsilon:torch.Tensor,T:torch.Tensor) -> torch.Tensor:"""Mathematical function for Stress +Args: epsilon:Strain.#(B,1) T:Temperature.#(B,1) +Return: sigma:Stress.#(B,1) """ # (B,1) sigma $=$ self.c_0 $^+$ self.c_1 \* torch.exp(self.a \* epsilon $^+$ self.b \*T)+ self.c_2 \* torch.cos(self.d \* epsilon \*T) return sigma +``` + +c) Predicted: LaSR (Llama-3.1-8b) + +$$ +\left(\left(\sqrt {\epsilon} \cdot (- 7 1 2. 3 6) + \left(\left(\frac {\sqrt {\epsilon}}{1 . 3 7 9 2 ^ {\epsilon}} \cdot 2. 2 7 9 8\right) \cdot (T - \epsilon) + \epsilon\right) + 6. 8 1 2 5\right) \cdot 1. 5 0 7 6 ^ {\sqrt {\epsilon}}\right) - \sin (\log (T)) +$$ + +d) Predicted: LLM-SR (Llama-3.1-8b) +```python +def equation(epsilon: np.ndarray, T: np.ndarray, params: np.ndarray) -> np.ndarray: + "" Mathematical function for Stress +Args: + epsilon: A numpy array representing observations of Strain. + T: A numpy array representing observations of Temperature. + params: Array of numeric constants or parameters to be optimized +Return: + A numpy array representing Stress as the result of applying the mathematical function to the inputs. +>>> output = params[0] * (epsilon ** (1/params[4]) - 1) + \ + params[1] * (epsilon ** (1/params[6]) - 1) + \ + params[2] * (epsilon ** (1/params[8]) - 1) * np.exp.params[3] * epsilon) + \ + params[5] * epsilon + params[6] * T + params[7] +return output +``` + +Figure 16. Example of output hypotheses from different LLM scientific equation discovery methods for MatSci0 problem in LSR-Synth material science domain. + +Ground Truth: + +$$ +F _ {0} \cdot \sin (t) - \beta \cdot \sin (v (t)) - \omega_ {0} ^ {2} \cdot x (t) ^ {3} - \omega_ {0} ^ {2} \cdot x (t) \cdot e ^ {- | x (t) |} +$$ + +# a) Predicted: Direct (Llama-3.1-8b) + +def equation(x: np.ndarray, t: np.ndarray, v: np.ndarray, params: np.ndarray) -> np.ndarray: + +""Mathematical function for Acceleration + +Args: + +x: A numpy array representing observations of Position at time t. + +t: A numpy array representing observations of Time. + +v: A numpy array representing observations of Velocity at time t. + +params: Array of numeric constants or parameters to be optimized + +Return: + +A numpy array representing Acceleration as the result of applying the mathematical function to the inputs. + +1 + +Since acceleration is Force/mass and Force is mass*velocity change, + +we represent this equation as + +a = dv/dt or in our representation format, given our position x it will correspond + +a = d^2x/dt^2 or by simple numeric analogy 4th order approximation involving high degree dependency functions: + +output = (params[0]*x**4 + params[1]*x**3 + params[2]*x**2 + params[3]*x + params[4]) + params[5]*v**2 + params[6]*t + params[7]*(x**2 + v) + +return output + +# b) Predicted: SGA (Llama-3.1-8b) + +```txt +class SymbolicEquation(nnModule): def init(self,c_0: float $= 9.50\mathrm{e} - 01$ ,c_1: float $= 9.50\mathrm{e} - 01$ ,c_2: float $= 9.50\mathrm{e} - 01$ ,c_3: float $= 1.05\mathrm{e} + 00$ : +``` + +Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args. + +Args: + +c_0(float): coefficient. + +c 1(float): coefficient. + +c_2(float); coefficient. + +c 3 (float): coefficient. + +1 1 + +super().__init_(self.c 0 = nn.Parameter(torch.tensor(c 0, dtype=torch.float)) # (1,) + +self.c1 = nn_PARAMETER(torch.tensor(c1, dtype=torch.float)) # (1,) + +self.c_2 = nn_PARAMETER(torch:tensor(c_2, dtype=torch.float)) # (1,) + +self.c_3 = nn_PARAMETER(torch.tensor(c_3, dtype=torch.float)) # (1,) + +def forward(self, x: torch.Tensor, t: torch.Tensor, v: torch.Tensor) -> torch.Tensor: + +""Mathematical function for Acceleration in Nonl-linear Harmonic Oscillator + +Args: + +x: Position at time t. # (B,) + +t: Time. # (B,) + +v: Velocity at time t. # (B,) + +Return: + +dv dt: Acceleration in Nonl-linear Harmonic Oscillator. # (B,) + +1 1 + +Non-linear relationship between x, t, and v + +dv_dt = self.c_0 * torch.exp(-self.c_1 * x) + self.c_2 * torch.cos(self.c_3 * t + self.c_3 * x) # (B, ) + +return dv_dt + +c) Predicted: LaSR (Llama-3.1-8b) + +$$ +\left(\frac {\left((x + x) \cdot \left(\left(\frac {\sqrt {x ^ {\mathrm {l i n e s}} - \frac {x}{\sin x}}}{x}\right) \cdot \sin (1 . 1 4 7 8 \cdot t) - x\right) \cdot \sin (x)\right)}{1 . 7 0 5 2} - \sin (0. 0 0 3 2 8 2 7)\right) +$$ + +d) Predicted: LLM-SR (Llama-3.1-8b) + +```python +def equation(x: np.ndarray, t: np.ndarray, v: np.ndarray, params: np.ndarray) -> np.ndarray: + "" Mathematical function for Acceleration +``` + +Args: + +```txt +x: A numpy array representing observations of Position at time t. +t: A numpy array representing observations of Time. +v: A numpy array representing observations of Velocity at time t. +params: Array of numeric constants or parameters to be optimized +``` + +Return: + +```python +A numpy array representing Acceleration as the result of applying the mathematical function to the inputs. +```python +``` +# Since acceleration is Force/mass and Force is mass*velocity change, +# we represent this equation as +# a = dv/dt or in our representation format, given our position x it will correspond to +# a = d^2x/dt^2 or by simple numeric analogy 4th order approximation involving high degree dependency functions: +output = (params[0]*x**4 + params[1]*x**3 + params[2]*x**2 + params[3]*x + params[4]) ++ params[5]*v**2 + params[6]*t + params[7]*(x**2 + v) +return output +``` + +Figure 17. Example of output hypotheses from different LLM scientific equation discovery methods for PO0 problem in LSR-Synth physics domain. \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10415/images/053f4e582db53b49c4f4556f3f98b2f4911c4a84b86aec09383eaac4c74971e9.jpg b/data/2025/2504_10xxx/2504.10415/images/053f4e582db53b49c4f4556f3f98b2f4911c4a84b86aec09383eaac4c74971e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..11199f85ffaf339994ee56b8d37c765624491327 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/053f4e582db53b49c4f4556f3f98b2f4911c4a84b86aec09383eaac4c74971e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1933afe2f4bd8acc6f6baded2d9f4a120e7f47d9514a175e90144d0025cdf2a3 +size 31301 diff --git a/data/2025/2504_10xxx/2504.10415/images/091c70ec5781ab888053104a16b510b8c2079a192b34634e9d1bd7774e541196.jpg b/data/2025/2504_10xxx/2504.10415/images/091c70ec5781ab888053104a16b510b8c2079a192b34634e9d1bd7774e541196.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d963b06e0e68f5ec29a5a046725a27a278d745e3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/091c70ec5781ab888053104a16b510b8c2079a192b34634e9d1bd7774e541196.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b050e3d072d237be85ce422e30c74cee29c678e47cf819b19001133e7ae41c1 +size 4811 diff --git a/data/2025/2504_10xxx/2504.10415/images/0c00954c33b422695f0cf22b16af7ff62ba75f2ae8dbc0e8c786e1780fa18f1b.jpg b/data/2025/2504_10xxx/2504.10415/images/0c00954c33b422695f0cf22b16af7ff62ba75f2ae8dbc0e8c786e1780fa18f1b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25b5156bf196ff1b21566ed8ff4d2475c05aab93 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/0c00954c33b422695f0cf22b16af7ff62ba75f2ae8dbc0e8c786e1780fa18f1b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1143945d65d7edac3489401028092c5c6126c9d073141a9b02f25d0056f1beb +size 126819 diff --git a/data/2025/2504_10xxx/2504.10415/images/16fbc11c1ac7574b7d1a59cd72413c0b5c055e02f015f88a1e7a9732b7b601d7.jpg b/data/2025/2504_10xxx/2504.10415/images/16fbc11c1ac7574b7d1a59cd72413c0b5c055e02f015f88a1e7a9732b7b601d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e807e92f4fef5e2a3cc48af8997aceeea913c4a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/16fbc11c1ac7574b7d1a59cd72413c0b5c055e02f015f88a1e7a9732b7b601d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57f8037d2856057a43568d179529e1aa4d58052f65bcb1be1ca711df857fe408 +size 1308 diff --git a/data/2025/2504_10xxx/2504.10415/images/17beb428c63f2aabd0dfe88d4ab8d11372d076979bffbc2f467162f6659a2623.jpg b/data/2025/2504_10xxx/2504.10415/images/17beb428c63f2aabd0dfe88d4ab8d11372d076979bffbc2f467162f6659a2623.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d9058ce5dbe77b6e998841177fb04d94d038e56 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/17beb428c63f2aabd0dfe88d4ab8d11372d076979bffbc2f467162f6659a2623.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef0ff069feb5b952ede5e5c521f97c89381a5d3397050a18d048308e488a8d6c +size 861 diff --git a/data/2025/2504_10xxx/2504.10415/images/1ea4c0e7fbb3e4c96f6c61b6448ed3db9ac0b6e1e3237c10a796be8369c6e18f.jpg b/data/2025/2504_10xxx/2504.10415/images/1ea4c0e7fbb3e4c96f6c61b6448ed3db9ac0b6e1e3237c10a796be8369c6e18f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3bae6432c39319d54a3d7e91b934956ebbd1377a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/1ea4c0e7fbb3e4c96f6c61b6448ed3db9ac0b6e1e3237c10a796be8369c6e18f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50560bb2e1e556624bcffda0b92f394cdbc241237eca979eb3fe9f31fc803916 +size 5734 diff --git a/data/2025/2504_10xxx/2504.10415/images/1ef59f871e9e9d07a925f7b0228547eee64a77dbbb598caf0d6216087b8b2bb6.jpg b/data/2025/2504_10xxx/2504.10415/images/1ef59f871e9e9d07a925f7b0228547eee64a77dbbb598caf0d6216087b8b2bb6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82678ccde2d7eea5d896e15540e1531e29c34978 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/1ef59f871e9e9d07a925f7b0228547eee64a77dbbb598caf0d6216087b8b2bb6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64a9744466ea33ee222e09e2a3ce50aab2ed6fcd815adcc558da378cb05ff8e0 +size 32611 diff --git a/data/2025/2504_10xxx/2504.10415/images/221b1498b3f8e47e6bf90ed1d149435f76b18381710e3d5b0115980fca2e9cd3.jpg b/data/2025/2504_10xxx/2504.10415/images/221b1498b3f8e47e6bf90ed1d149435f76b18381710e3d5b0115980fca2e9cd3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d2cbb35074aa922925b03c40a1d9020f9615292 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/221b1498b3f8e47e6bf90ed1d149435f76b18381710e3d5b0115980fca2e9cd3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee1d7969eee9938f1adbec3472c39cc3f9116b883ae166f02132548bbf97bb0f +size 63314 diff --git a/data/2025/2504_10xxx/2504.10415/images/221c6db0b8a670b721bdaf264013975ba34263b1f7cc7743cc6dd9d9435c6652.jpg b/data/2025/2504_10xxx/2504.10415/images/221c6db0b8a670b721bdaf264013975ba34263b1f7cc7743cc6dd9d9435c6652.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6115c501704cf19e6228eb8e7bb6a43b68b0dabd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/221c6db0b8a670b721bdaf264013975ba34263b1f7cc7743cc6dd9d9435c6652.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca916a37f8bf428f8e17c0f07d9b4198dfc9669c26527ae625ec644d966dfeaa +size 4809 diff --git a/data/2025/2504_10xxx/2504.10415/images/2943312887403cbd3614ec26d86312c9ed3797876e4b24a507d8b8ccaf08be34.jpg b/data/2025/2504_10xxx/2504.10415/images/2943312887403cbd3614ec26d86312c9ed3797876e4b24a507d8b8ccaf08be34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..906a4e9020acacda0aa589aede31c8a204b9d396 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/2943312887403cbd3614ec26d86312c9ed3797876e4b24a507d8b8ccaf08be34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bef99def82cf50f4481ec7c151c5f24aa9d1b575bbc48ff8911982c63c177c3 +size 1161 diff --git a/data/2025/2504_10xxx/2504.10415/images/29b608f1ad835a7b4b3cf0055c416cc691294f5fa75a973e1ef9650ca2acd5dc.jpg b/data/2025/2504_10xxx/2504.10415/images/29b608f1ad835a7b4b3cf0055c416cc691294f5fa75a973e1ef9650ca2acd5dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64f43db5d41fddd580cffb57331edfd4f04e346d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/29b608f1ad835a7b4b3cf0055c416cc691294f5fa75a973e1ef9650ca2acd5dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cac8653e11f96f60750ec3c82210c4e48f868ab6e4876d3f9e1df92174cdc09 +size 32509 diff --git a/data/2025/2504_10xxx/2504.10415/images/2adc38a0dbf93a061dc77164de6fb9a1f18be7dd91e3638892c85d532316150b.jpg b/data/2025/2504_10xxx/2504.10415/images/2adc38a0dbf93a061dc77164de6fb9a1f18be7dd91e3638892c85d532316150b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c77e6f9749369564d09f987b613f8c66c00af386 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/2adc38a0dbf93a061dc77164de6fb9a1f18be7dd91e3638892c85d532316150b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8eaebcf91718499d0dcaf2834434982411523f27ee42a19e36ef4a62a54b478 +size 4071 diff --git a/data/2025/2504_10xxx/2504.10415/images/31cc595c0702c239d43e0439b58294dc4ed053a8c0e2ce359af8d7154d769573.jpg b/data/2025/2504_10xxx/2504.10415/images/31cc595c0702c239d43e0439b58294dc4ed053a8c0e2ce359af8d7154d769573.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80f7b8835271ae3103b19e6a23c2b3fc01322a4c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/31cc595c0702c239d43e0439b58294dc4ed053a8c0e2ce359af8d7154d769573.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c42a77e914d04cf8917d7bb75b39203c8cc26434b8a6995965dd4237d2176d4 +size 5935 diff --git a/data/2025/2504_10xxx/2504.10415/images/396f55c66c8f2289a25537401b85bf1309aa6f76d7a756db809edb8483380734.jpg b/data/2025/2504_10xxx/2504.10415/images/396f55c66c8f2289a25537401b85bf1309aa6f76d7a756db809edb8483380734.jpg new file mode 100644 index 0000000000000000000000000000000000000000..949c45a77d29100b4b765de165cd1b2388fb65c0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/396f55c66c8f2289a25537401b85bf1309aa6f76d7a756db809edb8483380734.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98bc775f5f2abda160ddd94d7ff2e0cfd254fb0d0b5ba7b9c12f784e216fa601 +size 13975 diff --git a/data/2025/2504_10xxx/2504.10415/images/39fd3cc32845900a08aae36a6154ecdf290fd268ff7603ccbdc4f582bcc25f59.jpg b/data/2025/2504_10xxx/2504.10415/images/39fd3cc32845900a08aae36a6154ecdf290fd268ff7603ccbdc4f582bcc25f59.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a98c1d580f2ded1cefa138968b4334356d515bd1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/39fd3cc32845900a08aae36a6154ecdf290fd268ff7603ccbdc4f582bcc25f59.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ea59a4c248518884cb39f262319846c8c1e780f9d332fd9a7d4624ca82bbc14 +size 324480 diff --git a/data/2025/2504_10xxx/2504.10415/images/3d6accc6d6374c2b7e89a67dfa6fd230f75a652a635a6c19664eb439522c7b18.jpg b/data/2025/2504_10xxx/2504.10415/images/3d6accc6d6374c2b7e89a67dfa6fd230f75a652a635a6c19664eb439522c7b18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9606f09f3a248b3972a65109e247fa38790ecd8a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/3d6accc6d6374c2b7e89a67dfa6fd230f75a652a635a6c19664eb439522c7b18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98c986df92dc94677fabf39a544a4bafd7ebadf6baa40659f87cbe776535481b +size 7295 diff --git a/data/2025/2504_10xxx/2504.10415/images/4336f60ac23a31b7303fd2bcd302d1a07db5ad0fb5084d34cb24e039edff4412.jpg b/data/2025/2504_10xxx/2504.10415/images/4336f60ac23a31b7303fd2bcd302d1a07db5ad0fb5084d34cb24e039edff4412.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c832a113696ce417a5f578986ed058e3102da03 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/4336f60ac23a31b7303fd2bcd302d1a07db5ad0fb5084d34cb24e039edff4412.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62bbdf6443606afd2ef0e3bf51053ddda9a9f5999111c40dadf3013355c0239c +size 998 diff --git a/data/2025/2504_10xxx/2504.10415/images/44091b3e8c1539821af5a09eee700b277a69b0b6828f6d77a6631b20dba9bcac.jpg b/data/2025/2504_10xxx/2504.10415/images/44091b3e8c1539821af5a09eee700b277a69b0b6828f6d77a6631b20dba9bcac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c196db15667a2da63865eff870b822783eca0c29 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/44091b3e8c1539821af5a09eee700b277a69b0b6828f6d77a6631b20dba9bcac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e8e952e2c1fd66c2e486d8ab949e7ad378da935066c92c1e0168d30da755546 +size 6871 diff --git a/data/2025/2504_10xxx/2504.10415/images/45452e4a8a849977e88075f5d626da8a4d10baf79ead0ecba98829cb4fc616f2.jpg b/data/2025/2504_10xxx/2504.10415/images/45452e4a8a849977e88075f5d626da8a4d10baf79ead0ecba98829cb4fc616f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c812e4f0bf17780e076e962dbb171720725d250 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/45452e4a8a849977e88075f5d626da8a4d10baf79ead0ecba98829cb4fc616f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06820d1b997a39d0958a03ac2e29c9f150d592b613f460d806e2772855023e53 +size 8013 diff --git a/data/2025/2504_10xxx/2504.10415/images/45adbee8894555e3106f5a2dee37d6404abf1849e69eefb9c0dccc433661da59.jpg b/data/2025/2504_10xxx/2504.10415/images/45adbee8894555e3106f5a2dee37d6404abf1849e69eefb9c0dccc433661da59.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e5d144be20905100f2959866cf90c7ae33c8968 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/45adbee8894555e3106f5a2dee37d6404abf1849e69eefb9c0dccc433661da59.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ddf5fbd2eed2c814d99578864ca5f5c011133639e65a4e64162f1af1bddfb32 +size 5621 diff --git a/data/2025/2504_10xxx/2504.10415/images/49bbe1d88c7d1016d1299e0446c3a6af030be9f0f4775174147f6c3751655ae0.jpg b/data/2025/2504_10xxx/2504.10415/images/49bbe1d88c7d1016d1299e0446c3a6af030be9f0f4775174147f6c3751655ae0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89402832aa1a67aa55767829ff50a0db15733caf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/49bbe1d88c7d1016d1299e0446c3a6af030be9f0f4775174147f6c3751655ae0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ca296f3c12a8879def16c3997d53d81b1823e56f8053d3057e741ee4ce638d4 +size 296256 diff --git a/data/2025/2504_10xxx/2504.10415/images/4c620bd69fe9f710128e1e41b30a46b9f571f0d7ea8754a51132638fc907b0c9.jpg b/data/2025/2504_10xxx/2504.10415/images/4c620bd69fe9f710128e1e41b30a46b9f571f0d7ea8754a51132638fc907b0c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ecaaaeb78c9016f8f53a2a1307e89c293068d1c1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/4c620bd69fe9f710128e1e41b30a46b9f571f0d7ea8754a51132638fc907b0c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6251bf8f4ee1487e0a9ebf56dcbd66c347c7c20cb2c4ad0c8065e03142362cb7 +size 72953 diff --git a/data/2025/2504_10xxx/2504.10415/images/4f27e9f084032a2c13ab16f8039019f7fcbfd78a412426da76d8a73012a4b88b.jpg b/data/2025/2504_10xxx/2504.10415/images/4f27e9f084032a2c13ab16f8039019f7fcbfd78a412426da76d8a73012a4b88b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..391c958b55ae57b721d599a6f4966ed9ce79e1d1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/4f27e9f084032a2c13ab16f8039019f7fcbfd78a412426da76d8a73012a4b88b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c0db15aed0a41e1bdd661ba6af4ba5cf35abc5923226291c994bcda70524d9f +size 261359 diff --git a/data/2025/2504_10xxx/2504.10415/images/567618e539e7aaa3e227f4a99d09b32c252bb82a09a58ea4265eb74d2ca07490.jpg b/data/2025/2504_10xxx/2504.10415/images/567618e539e7aaa3e227f4a99d09b32c252bb82a09a58ea4265eb74d2ca07490.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f325477a3d605c7c87cfd61251d00fee5644e7c5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/567618e539e7aaa3e227f4a99d09b32c252bb82a09a58ea4265eb74d2ca07490.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eaf2f513e6247ac9ba3c61a9de39fba57fbec950241b78b11746f9578f24304c +size 6180 diff --git a/data/2025/2504_10xxx/2504.10415/images/59a6b36bbcf629cb69397918b9c2aebc5072035379d941e42bc976b701aebb85.jpg b/data/2025/2504_10xxx/2504.10415/images/59a6b36bbcf629cb69397918b9c2aebc5072035379d941e42bc976b701aebb85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75a220453923469f774e374d6778102d7e8373a1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/59a6b36bbcf629cb69397918b9c2aebc5072035379d941e42bc976b701aebb85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b2e3bd208a4a8946a1d61fa481e89fd490d0b10c07e41741fe3c5a7efb120ab +size 4041 diff --git a/data/2025/2504_10xxx/2504.10415/images/70967ca904be5f1f1a8b9f4dfc8cd75b9db6b10b25fae22dcfd2b1d0e13189dd.jpg b/data/2025/2504_10xxx/2504.10415/images/70967ca904be5f1f1a8b9f4dfc8cd75b9db6b10b25fae22dcfd2b1d0e13189dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c66be953695fa97fbb740445304f77713268db15 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/70967ca904be5f1f1a8b9f4dfc8cd75b9db6b10b25fae22dcfd2b1d0e13189dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c0b0cd90097bb5ab3a9092142e7f544fecfd3105f724396cd28e69338cb444e +size 6432 diff --git a/data/2025/2504_10xxx/2504.10415/images/7287530c601c8f7a06f0551faaeaa9113407b59a179cd6bf6b36c8bedb772eb5.jpg b/data/2025/2504_10xxx/2504.10415/images/7287530c601c8f7a06f0551faaeaa9113407b59a179cd6bf6b36c8bedb772eb5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8469b6ad9ef44495e03cbf7dac17ce040995038d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/7287530c601c8f7a06f0551faaeaa9113407b59a179cd6bf6b36c8bedb772eb5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4beec21cb2bdf6e4bbdbd3d8cf89bc441c7d0c7802e839c4dcc563aceb156c58 +size 35484 diff --git a/data/2025/2504_10xxx/2504.10415/images/76b6590aa51f2645dd5819d5bfe3ca1e5f13e79490781dd4e4338503b9cb123c.jpg b/data/2025/2504_10xxx/2504.10415/images/76b6590aa51f2645dd5819d5bfe3ca1e5f13e79490781dd4e4338503b9cb123c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..245eecc4954a7e128e93ff11c3d442ad95b30ead --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/76b6590aa51f2645dd5819d5bfe3ca1e5f13e79490781dd4e4338503b9cb123c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d08f0869ec6990c50555ca698fa3393991ae1b7ece4ea08c7bedc41b09b74e99 +size 123264 diff --git a/data/2025/2504_10xxx/2504.10415/images/7967b1fc36cb281ebaa90a39635f07a107d1c6c1634b17b707e0936933c15249.jpg b/data/2025/2504_10xxx/2504.10415/images/7967b1fc36cb281ebaa90a39635f07a107d1c6c1634b17b707e0936933c15249.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd38819f3a8f0e8877c3218c19cdd90128513fc7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/7967b1fc36cb281ebaa90a39635f07a107d1c6c1634b17b707e0936933c15249.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67962c8de3cf5a4f83170b3d88f0105c0e840c1544360fad34c7965316a18130 +size 9816 diff --git a/data/2025/2504_10xxx/2504.10415/images/829ccf7d250536ce02ae636e646d148b83b71780115b434595a8856f534f2c26.jpg b/data/2025/2504_10xxx/2504.10415/images/829ccf7d250536ce02ae636e646d148b83b71780115b434595a8856f534f2c26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49642acfaa6b7ac64d94e05594109dc5f9798b24 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/829ccf7d250536ce02ae636e646d148b83b71780115b434595a8856f534f2c26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d34576c492cc353872985faa47211b5b834e5483fce41ea4e07d2cf357baddb9 +size 10689 diff --git a/data/2025/2504_10xxx/2504.10415/images/83fdb943052af6819deb8c9f9e526ad919dcda5736b655b45a9e19dc080f02a4.jpg b/data/2025/2504_10xxx/2504.10415/images/83fdb943052af6819deb8c9f9e526ad919dcda5736b655b45a9e19dc080f02a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e946e1e480d462d4d9b404cefac00a58c89ca625 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/83fdb943052af6819deb8c9f9e526ad919dcda5736b655b45a9e19dc080f02a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ec6ec1b613cae14a7d012f35ac2edaee701570f790f35f64dd613802643dca2 +size 1100 diff --git a/data/2025/2504_10xxx/2504.10415/images/894c6bbe436e91e344a48f1b4afa6678514260a01ee7aec8ebbf71e5ed22ed03.jpg b/data/2025/2504_10xxx/2504.10415/images/894c6bbe436e91e344a48f1b4afa6678514260a01ee7aec8ebbf71e5ed22ed03.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c21f4931789242bcd4150269bb3fcba4ef9f47ce --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/894c6bbe436e91e344a48f1b4afa6678514260a01ee7aec8ebbf71e5ed22ed03.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfac0eae51ec74427cfe952529b5f39c1923a2b1982fc75a042aab1911db4769 +size 60421 diff --git a/data/2025/2504_10xxx/2504.10415/images/8ba728d78f2919727c08b3690d02bb7aec14be00772e7730233d78780cba6800.jpg b/data/2025/2504_10xxx/2504.10415/images/8ba728d78f2919727c08b3690d02bb7aec14be00772e7730233d78780cba6800.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94c8bc08f23cd73379a8c14afd799cc594538972 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/8ba728d78f2919727c08b3690d02bb7aec14be00772e7730233d78780cba6800.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a98d77a0f902d00e69bb09ddf7a908be804423d5498d49696c00ab7d5ecd02f6 +size 101507 diff --git a/data/2025/2504_10xxx/2504.10415/images/94c039b1cedc695be2873c2dcb0437ad37c78b74f2f5c36f13aca7b21cf0c837.jpg b/data/2025/2504_10xxx/2504.10415/images/94c039b1cedc695be2873c2dcb0437ad37c78b74f2f5c36f13aca7b21cf0c837.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47b5d59d914c4469f9cce9de3d08601f2af72efc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/94c039b1cedc695be2873c2dcb0437ad37c78b74f2f5c36f13aca7b21cf0c837.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c190b70f3b6a6a8e63fac7a64c18db63ac55e2d251d794be6ce9017493ac60f3 +size 1284 diff --git a/data/2025/2504_10xxx/2504.10415/images/957e0d6df1af58f9ace00db895f47d3f06af9fc2946b94c227d5a134b99afa32.jpg b/data/2025/2504_10xxx/2504.10415/images/957e0d6df1af58f9ace00db895f47d3f06af9fc2946b94c227d5a134b99afa32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..967c263240701d28b0483f1cf920690087529fd2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/957e0d6df1af58f9ace00db895f47d3f06af9fc2946b94c227d5a134b99afa32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f102e9573dee9039f5d966946f30b54592b6bcd830f4f3f8d9529b710941fda +size 4344 diff --git a/data/2025/2504_10xxx/2504.10415/images/980926fc19c4a5dbe8a321ba11ada09393daa23e399bacfb2e5b2bb0db7cc03b.jpg b/data/2025/2504_10xxx/2504.10415/images/980926fc19c4a5dbe8a321ba11ada09393daa23e399bacfb2e5b2bb0db7cc03b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..00eeb7e1209a121537d6e15e03a2deb76b46d0ca --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/980926fc19c4a5dbe8a321ba11ada09393daa23e399bacfb2e5b2bb0db7cc03b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d69b709e93e85208368c25df92048bdb5702601e5cf7c47cc396ad47cc5de95 +size 38649 diff --git a/data/2025/2504_10xxx/2504.10415/images/9f909e31eff9f4ce76eed70ce90bc2bb14486252d0d4d100e27f4576d9575cab.jpg b/data/2025/2504_10xxx/2504.10415/images/9f909e31eff9f4ce76eed70ce90bc2bb14486252d0d4d100e27f4576d9575cab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b578ff5fb45c40024286f5247be1f6eb23a443a6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/9f909e31eff9f4ce76eed70ce90bc2bb14486252d0d4d100e27f4576d9575cab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ded3ae659d545af83324a5b5058d72f2a1e7c3a5160f0cbb3b0bb9cb53147cc0 +size 144047 diff --git a/data/2025/2504_10xxx/2504.10415/images/a2d3aa7e47027c5fc32383269784b60c4e30210c9a1ddb79ea5b1ae367be0d02.jpg b/data/2025/2504_10xxx/2504.10415/images/a2d3aa7e47027c5fc32383269784b60c4e30210c9a1ddb79ea5b1ae367be0d02.jpg new file mode 100644 index 0000000000000000000000000000000000000000..181e180f4fe355c656d6959a616a48e55e04ba39 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/a2d3aa7e47027c5fc32383269784b60c4e30210c9a1ddb79ea5b1ae367be0d02.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce018bb7e083ca9e14661dfd8d1c8d44a0d7ba4bc32772a45fd29b0425fbfd0b +size 1113 diff --git a/data/2025/2504_10xxx/2504.10415/images/a7c6d759afe9fd77afb493d4e3d1f4cb1fd3debd494f956b9c7e6f32ff6755e4.jpg b/data/2025/2504_10xxx/2504.10415/images/a7c6d759afe9fd77afb493d4e3d1f4cb1fd3debd494f956b9c7e6f32ff6755e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2156921596d00c2708f330425c559078df201ca7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/a7c6d759afe9fd77afb493d4e3d1f4cb1fd3debd494f956b9c7e6f32ff6755e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90a08e1f765cd6c988b4afeb09b22af77382c8cf3e6439c6e216341ef5b603fa +size 10110 diff --git a/data/2025/2504_10xxx/2504.10415/images/ad8758eeed7443f052d56ef193c8c3f87f7853ba79964be79566c78337c2fb16.jpg b/data/2025/2504_10xxx/2504.10415/images/ad8758eeed7443f052d56ef193c8c3f87f7853ba79964be79566c78337c2fb16.jpg new file mode 100644 index 0000000000000000000000000000000000000000..854264467214ae761b7108c29006263146525583 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/ad8758eeed7443f052d56ef193c8c3f87f7853ba79964be79566c78337c2fb16.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:172a30eac1ebc3e586b27397f9742c7e2fb8088b71242bf4de0e7bd1ef228dd2 +size 7657 diff --git a/data/2025/2504_10xxx/2504.10415/images/b097fbf8c4d56eba060df42ecd3b49838fa4ada765fd0915a1a5c18f2c0c67bf.jpg b/data/2025/2504_10xxx/2504.10415/images/b097fbf8c4d56eba060df42ecd3b49838fa4ada765fd0915a1a5c18f2c0c67bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2896523b56e49e40abe9a997865c9263b49f9011 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/b097fbf8c4d56eba060df42ecd3b49838fa4ada765fd0915a1a5c18f2c0c67bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6242d275bf68295d9deafda6d1169fe804a9f1b95b17eb87e2650eba566ebebb +size 4538 diff --git a/data/2025/2504_10xxx/2504.10415/images/b2f081c94fa3b127bfd848223e598ebab8dca7669779b99dbbb482707e9719d8.jpg b/data/2025/2504_10xxx/2504.10415/images/b2f081c94fa3b127bfd848223e598ebab8dca7669779b99dbbb482707e9719d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e53685273229e88c6e7b84b5c3e5489c85a5be5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/b2f081c94fa3b127bfd848223e598ebab8dca7669779b99dbbb482707e9719d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68cb50d06ccdb12b9fc0511852aeb80bbd8d1839ccd7b07074c187c98774756a +size 1230 diff --git a/data/2025/2504_10xxx/2504.10415/images/b4248be90e578b449703e07ee0bf472829ee92a629bf290e7b43622f53874a51.jpg b/data/2025/2504_10xxx/2504.10415/images/b4248be90e578b449703e07ee0bf472829ee92a629bf290e7b43622f53874a51.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab0ea3acad35241c8a86a10f249fe464557943de --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/b4248be90e578b449703e07ee0bf472829ee92a629bf290e7b43622f53874a51.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe79aedc6d8e160c7e2615b51d3dce844a01df7974159d04ba5eae381e849387 +size 1199 diff --git a/data/2025/2504_10xxx/2504.10415/images/bb81ea4b5f5d5a4f975ff0466f5ed65eeae3a9584feb539e69d440e2f25d1471.jpg b/data/2025/2504_10xxx/2504.10415/images/bb81ea4b5f5d5a4f975ff0466f5ed65eeae3a9584feb539e69d440e2f25d1471.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cbd342f7ec6184476737ee92928adf8d9872cb84 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/bb81ea4b5f5d5a4f975ff0466f5ed65eeae3a9584feb539e69d440e2f25d1471.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7201dae45ddb4b49c485cd126c7d3b395bb11cf77ea524bba55daa8ac2fc26b8 +size 1027 diff --git a/data/2025/2504_10xxx/2504.10415/images/ca65ea72e3598758183c47b1c9383295ef13fd6f3425058eab5ecd3109f15d85.jpg b/data/2025/2504_10xxx/2504.10415/images/ca65ea72e3598758183c47b1c9383295ef13fd6f3425058eab5ecd3109f15d85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb2ca0c27bbc0eddd91a098e41b0e6b07167cdba --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/ca65ea72e3598758183c47b1c9383295ef13fd6f3425058eab5ecd3109f15d85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e568a0c45d54eb504cc81c51c4cc61519184563ca7209e7a0c92387dc6ae3095 +size 14233 diff --git a/data/2025/2504_10xxx/2504.10415/images/cf56e5983e34100141acb2fa8afb0f7cc65db6cfde1062c5930415f9581960f8.jpg b/data/2025/2504_10xxx/2504.10415/images/cf56e5983e34100141acb2fa8afb0f7cc65db6cfde1062c5930415f9581960f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0dc7b2edef7c68e7c7fbf4b58f6a0a619fe594e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/cf56e5983e34100141acb2fa8afb0f7cc65db6cfde1062c5930415f9581960f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8eb0a5fc4f279fa5096427a8740fadd1a65d5032ba03994e83ade5361bdb4ac +size 3506 diff --git a/data/2025/2504_10xxx/2504.10415/images/d03139fa8f37c0317378e921fd4d64d969658d30eed59db871fb79cadd984a89.jpg b/data/2025/2504_10xxx/2504.10415/images/d03139fa8f37c0317378e921fd4d64d969658d30eed59db871fb79cadd984a89.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efeecbb866aa53fa019332fb1232d44c4e8acf7b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/d03139fa8f37c0317378e921fd4d64d969658d30eed59db871fb79cadd984a89.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:965f1dad14574a9c5b83f63f74f4d5b06e340dd07ccfddb7ea916f1a8a97bf8a +size 4417 diff --git a/data/2025/2504_10xxx/2504.10415/images/db1376c52674cb5c8982ddceb32ff22a3f8f8dbaf2c818ad9838ab43ad246a39.jpg b/data/2025/2504_10xxx/2504.10415/images/db1376c52674cb5c8982ddceb32ff22a3f8f8dbaf2c818ad9838ab43ad246a39.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87524ea6db39ff75e28b4267361fca1e97c525b2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/db1376c52674cb5c8982ddceb32ff22a3f8f8dbaf2c818ad9838ab43ad246a39.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa4ce69d8f0bd8edcd3961a5bbf579af01d9e0c72f6a2b06cdcaf670e1c4ff14 +size 10442 diff --git a/data/2025/2504_10xxx/2504.10415/images/dd4ace02bfdbbc327dd22c6ca51c918e6611e6e1a0a6dffb9ff8c4f2e067743e.jpg b/data/2025/2504_10xxx/2504.10415/images/dd4ace02bfdbbc327dd22c6ca51c918e6611e6e1a0a6dffb9ff8c4f2e067743e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77545c2931fa5f11bc8be128b104a04e711fe299 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/dd4ace02bfdbbc327dd22c6ca51c918e6611e6e1a0a6dffb9ff8c4f2e067743e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a323fadc51ee95b43ad23f406b3b52917f841460f1848befd131bcf1c8a5d33 +size 1107 diff --git a/data/2025/2504_10xxx/2504.10415/images/deefd5b634e32daa868eb85f29a5385e1c0ea66ff650356d310d88c0ea387d68.jpg b/data/2025/2504_10xxx/2504.10415/images/deefd5b634e32daa868eb85f29a5385e1c0ea66ff650356d310d88c0ea387d68.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82e3446de28fc9dccf136febdc4e759e8913fc5a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/deefd5b634e32daa868eb85f29a5385e1c0ea66ff650356d310d88c0ea387d68.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58b72443c799650dca75f4f7f0befbb1d5f41147e3d6b631a8dadc6a9623a917 +size 7901 diff --git a/data/2025/2504_10xxx/2504.10415/images/e0d63bd33ab92d526518753edc49274d6d59f20397bf8d0354c580b9853762fa.jpg b/data/2025/2504_10xxx/2504.10415/images/e0d63bd33ab92d526518753edc49274d6d59f20397bf8d0354c580b9853762fa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0403a7436b9c71571c21a63fcf1e8664a58cae83 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/e0d63bd33ab92d526518753edc49274d6d59f20397bf8d0354c580b9853762fa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a394b41f10767c20d04c0e2cc34d8708351b29f09625f6992da6de02fb4ccd54 +size 8885 diff --git a/data/2025/2504_10xxx/2504.10415/images/e19abcc9793b69233bc2068d01085943b55800e6c5368956853e58e8c2357a1e.jpg b/data/2025/2504_10xxx/2504.10415/images/e19abcc9793b69233bc2068d01085943b55800e6c5368956853e58e8c2357a1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65f491fbe93f690ed201213f4b3319ab5e716643 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/e19abcc9793b69233bc2068d01085943b55800e6c5368956853e58e8c2357a1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b5c89f274940ab8ccb8bae20215bf908c7a2132fe37f7e17a2eeb7183d2df1c +size 4104 diff --git a/data/2025/2504_10xxx/2504.10415/images/e6d31a599f2e56fc3e8bcf784ee3f05f71964409e21e6a4b52c86a09aa0d91e6.jpg b/data/2025/2504_10xxx/2504.10415/images/e6d31a599f2e56fc3e8bcf784ee3f05f71964409e21e6a4b52c86a09aa0d91e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cbb68ab49bd4c742da18cc12799560acff3ba7e5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/e6d31a599f2e56fc3e8bcf784ee3f05f71964409e21e6a4b52c86a09aa0d91e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7aa6e9992fe9bfafe1f54ae734eb7d0d988609d39e09e2ca5d849ecdf3ca99c5 +size 15046 diff --git a/data/2025/2504_10xxx/2504.10415/images/e74e4296a0117c3464660b21e81f5f6ba6c0fe69bfcfaa4bb89adf654b5ed093.jpg b/data/2025/2504_10xxx/2504.10415/images/e74e4296a0117c3464660b21e81f5f6ba6c0fe69bfcfaa4bb89adf654b5ed093.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfa16d77aa685e63633c9f36516d400538d4ee57 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/e74e4296a0117c3464660b21e81f5f6ba6c0fe69bfcfaa4bb89adf654b5ed093.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f6a93a8a6f82f840313ad2cc216c93dcc0b19f84f07d9c7de584bacf0c663a4 +size 6901 diff --git a/data/2025/2504_10xxx/2504.10415/images/eaaa041fd9512203f4e34c4bec9c0a6c971fa1e475a5cc93922305ebde0dc5dc.jpg b/data/2025/2504_10xxx/2504.10415/images/eaaa041fd9512203f4e34c4bec9c0a6c971fa1e475a5cc93922305ebde0dc5dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..908892f2bd99824b4dfd0f0f8cb6e9d626be36b1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/eaaa041fd9512203f4e34c4bec9c0a6c971fa1e475a5cc93922305ebde0dc5dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2da9f7c7c46ee6d023f10de08b76b977c4492e45525a6660793a7376b4c0218 +size 1442 diff --git a/data/2025/2504_10xxx/2504.10415/images/f5679f3c4d121b4ea5c20faf879d882e4987e7cc96f2c511c1b25316fcd262e4.jpg b/data/2025/2504_10xxx/2504.10415/images/f5679f3c4d121b4ea5c20faf879d882e4987e7cc96f2c511c1b25316fcd262e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e61386bb2e7ee6adbd53b348a15c534273957000 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/f5679f3c4d121b4ea5c20faf879d882e4987e7cc96f2c511c1b25316fcd262e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9741d190039f8fcc797aec21ef142477869a2e317c69d66b91f0f2ef797088d0 +size 37367 diff --git a/data/2025/2504_10xxx/2504.10415/images/f5d825a3634b5f016ef48b7a8e3de61dc5544258d05e1729a47a488b41937f63.jpg b/data/2025/2504_10xxx/2504.10415/images/f5d825a3634b5f016ef48b7a8e3de61dc5544258d05e1729a47a488b41937f63.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4ad722b1ef3766ce8ca21c7548a194a2ba54201 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/f5d825a3634b5f016ef48b7a8e3de61dc5544258d05e1729a47a488b41937f63.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aad3705432b033239c9ad732ca5e65e052838df49d5c9d58faabd65dd6a59967 +size 323496 diff --git a/data/2025/2504_10xxx/2504.10415/images/f71f46fe53f6fb3b1b827b582fcd45a5f92ea2842866c6ce5dcdfaf13dedb8d8.jpg b/data/2025/2504_10xxx/2504.10415/images/f71f46fe53f6fb3b1b827b582fcd45a5f92ea2842866c6ce5dcdfaf13dedb8d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5bcd5332da4b4e99e05a866020c5abd4d9f8c5b0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/f71f46fe53f6fb3b1b827b582fcd45a5f92ea2842866c6ce5dcdfaf13dedb8d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5159325b3e7aaa8a108c4dd4575108b751e08df3f6f5d54e57ff8d0e54dd04bc +size 78003 diff --git a/data/2025/2504_10xxx/2504.10415/images/fb11f34bbb115c03e1386d66501627f890a455552d3792179b4cbe06566017dc.jpg b/data/2025/2504_10xxx/2504.10415/images/fb11f34bbb115c03e1386d66501627f890a455552d3792179b4cbe06566017dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d293f09592e4746b615502ee5e6f9e2eb8ebcb3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/images/fb11f34bbb115c03e1386d66501627f890a455552d3792179b4cbe06566017dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e07eec3735b2b5ca594571f2fcbf13b338e2f54fcfd58fc9451e80c01d7bfb2 +size 64597 diff --git a/data/2025/2504_10xxx/2504.10415/layout.json b/data/2025/2504_10xxx/2504.10415/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..eb9bc1f18666fb97a1ef0f547474ae7f21e485d4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10415/layout.json @@ -0,0 +1,19767 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 68, + 87, + 526, + 124 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 87, + 526, + 124 + ], + "spans": [ + { + "bbox": [ + 68, + 87, + 526, + 124 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 157, + 482, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 157, + 482, + 187 + ], + "spans": [ + { + "bbox": [ + 111, + 157, + 482, + 187 + ], + "type": "text", + "content": "Parshin Shojaee* 1 Ngoc-Hieu Nguyen* 2 Kazem Meidani 34 Amir Barati Farimani 3 Khoa D Doan 2 Chandan K Reddy 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 108, + 190, + 485, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 190, + 485, + 201 + ], + "spans": [ + { + "bbox": [ + 108, + 190, + 485, + 201 + ], + "type": "text", + "content": "Website: https://github.com/deep-symbolic-mathematics/llm-srbench" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 148, + 223, + 196, + 235 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 223, + 196, + 235 + ], + "spans": [ + { + "bbox": [ + 148, + 223, + 196, + 235 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 240, + 272, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 240, + 272, + 624 + ], + "spans": [ + { + "bbox": [ + 72, + 240, + 272, + 624 + ], + "type": "text", + "content": "Scientific equation discovery has long been a cornerstone of scientific progress, enabling the derivation of laws governing natural phenomena. Recently, Large Language Models (LLMs) have gained interest for this task due to their potential to leverage embedded scientific knowledge for hypothesis generation. However, it is difficult to assess the true discovery capabilities of these methods because existing benchmarks often use well-known equations. This makes them vulnerable to memorization by LLMs and results in inflated performance metrics that do not reflect genuine discovery. In this paper, we introduce LLM-SRBench, a comprehensive benchmark with 239 challenging problems across four scientific domains specifically designed to evaluate LLM-based scientific equation discovery methods while preventing trivial memorization. Our benchmark comprises two main categories: LSR-Transform, which transforms common physical models into less common mathematical representations to test reasoning beyond memorized forms, and LSR-Synth, which introduces synthetic, discovery-driven problems requiring data-driven reasoning. Through extensive evaluation of several state-of-the-art methods, using both open and closed LLMs, we find that the best-performing system so far achieves only " + }, + { + "bbox": [ + 72, + 240, + 272, + 624 + ], + "type": "inline_equation", + "content": "31.5\\%" + }, + { + "bbox": [ + 72, + 240, + 272, + 624 + ], + "type": "text", + "content": " symbolic accuracy. These findings highlight the challenges of scientific equation discovery, positioning LLM-SRBench as a valuable resource for future research." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 314, + 224, + 533, + 346 + ], + "blocks": [ + { + "bbox": [ + 314, + 224, + 533, + 346 + ], + "lines": [ + { + "bbox": [ + 314, + 224, + 533, + 346 + ], + "spans": [ + { + "bbox": [ + 314, + 224, + 533, + 346 + ], + "type": "image", + "image_path": "29b608f1ad835a7b4b3cf0055c416cc691294f5fa75a973e1ef9650ca2acd5dc.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 360, + 543, + 417 + ], + "lines": [ + { + "bbox": [ + 304, + 360, + 543, + 417 + ], + "spans": [ + { + "bbox": [ + 304, + 360, + 543, + 417 + ], + "type": "text", + "content": "Figure 1. Error analysis comparing simple LLM sampling (Llama-3.1-8B) on 100 Feynman problems versus LLM-SRBench datasets (LSR-Transform and LSR-Synth). The sharp drops in numeric error curves and considerably lower symbolic error for Feynman problems suggest memorization rather than gradual discovery." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 445, + 385, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 445, + 385, + 456 + ], + "spans": [ + { + "bbox": [ + 305, + 445, + 385, + 456 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 460, + 543, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 460, + 543, + 604 + ], + "spans": [ + { + "bbox": [ + 303, + 460, + 543, + 604 + ], + "type": "text", + "content": "Equation discovery, the process of uncovering symbolic mathematical expressions from observational data, has been a cornerstone of scientific advancement. This task, also known as symbolic regression (SR), goes beyond mere data-driven predictive modeling by seeking interpretable mathematical relations that reveal the underlying mechanisms of natural phenomena. When scientists derive mathematical equations from empirical data, they gain more than just predictive power – they obtain insights into fundamental physical principles, enable extrapolation beyond observed data, and facilitate knowledge transfer across scientific domains (Langley, 1981; Schmidt & Lipson, 2009)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 609, + 544, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 609, + 544, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 609, + 544, + 717 + ], + "type": "text", + "content": "Standard approaches to equation discovery have primarily relied on genetic programming (GP) and evolutionary algorithms (Cranmer, 2023; La Cava et al., 2021), which represent mathematical expressions as trees and navigate the vast space of possible equations through evolutionary search techniques. However, these methods face two fundamental challenges. First, the NP-hard nature of equation discovery (Virgolin & Pissis, 2022) makes their random mutation and crossover operations computationally prohibitive across" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 645, + 290, + 677 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 645, + 290, + 677 + ], + "spans": [ + { + "bbox": [ + 52, + 645, + 290, + 677 + ], + "type": "text", + "content": "*Equal contribution ¹Virginia Tech ²VinUniversity ³Carnegie Mellon University ⁴Capital One. Correspondence to: Parshin Shojaee ." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 685, + 290, + 717 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 685, + 290, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 685, + 290, + 717 + ], + "type": "text", + "content": "Proceedings of the " + }, + { + "bbox": [ + 52, + 685, + 290, + 717 + ], + "type": "inline_equation", + "content": "42^{nd}" + }, + { + "bbox": [ + 52, + 685, + 290, + 717 + ], + "type": "text", + "content": " International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 14, + 226, + 35, + 563 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 226, + 35, + 563 + ], + "spans": [ + { + "bbox": [ + 14, + 226, + 35, + 563 + ], + "type": "text", + "content": "arXiv:2504.10415v2 [cs.CL] 7 Jun 2025" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 90, + 67, + 101, + 80 + ], + "blocks": [ + { + "bbox": [ + 90, + 67, + 101, + 80 + ], + "lines": [ + { + "bbox": [ + 90, + 67, + 101, + 80 + ], + "spans": [ + { + "bbox": [ + 90, + 67, + 101, + 80 + ], + "type": "image", + "image_path": "94c039b1cedc695be2873c2dcb0437ad37c78b74f2f5c36f13aca7b21cf0c837.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 108, + 69, + 165, + 77 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 69, + 165, + 77 + ], + "spans": [ + { + "bbox": [ + 108, + 69, + 165, + 77 + ], + "type": "text", + "content": "Goal / Instruction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 88, + 82, + 200, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 82, + 200, + 103 + ], + "spans": [ + { + "bbox": [ + 88, + 82, + 200, + 103 + ], + "type": "text", + "content": "- Discover the mathematical equation/law that describes [output variable] based on given [input features]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 88, + 103, + 203, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 103, + 203, + 131 + ], + "spans": [ + { + "bbox": [ + 88, + 103, + 203, + 131 + ], + "type": "text", + "content": "- Use domain-specific knowledge of [the scientific field] and provided data samples to find an equation that is scientifically valid and fits the data well." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 88, + 138, + 101, + 152 + ], + "blocks": [ + { + "bbox": [ + 88, + 138, + 101, + 152 + ], + "lines": [ + { + "bbox": [ + 88, + 138, + 101, + 152 + ], + "spans": [ + { + "bbox": [ + 88, + 138, + 101, + 152 + ], + "type": "image", + "image_path": "16fbc11c1ac7574b7d1a59cd72413c0b5c055e02f015f88a1e7a9732b7b601d7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 108, + 142, + 166, + 149 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 142, + 166, + 149 + ], + "spans": [ + { + "bbox": [ + 108, + 142, + 166, + 149 + ], + "type": "text", + "content": "Scientific Context" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 97, + 156, + 151, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 156, + 151, + 163 + ], + "spans": [ + { + "bbox": [ + 97, + 156, + 151, + 163 + ], + "type": "text", + "content": "Problem description" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 87, + 163, + 184, + 177 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 87, + 163, + 184, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 163, + 184, + 170 + ], + "spans": [ + { + "bbox": [ + 87, + 163, + 184, + 170 + ], + "type": "text", + "content": "Variable names and descriptions" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 87, + 170, + 123, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 170, + 123, + 177 + ], + "spans": [ + { + "bbox": [ + 87, + 170, + 123, + 177 + ], + "type": "text", + "content": "Example:" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "spans": [ + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "text", + "content": "Find an equation in the field of classical mechanics that describes the mass " + }, + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "inline_equation", + "content": "(m)" + }, + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "text", + "content": " needed to store energy in an oscillating system, given physical input variables: mean stored energy " + }, + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "inline_equation", + "content": "(E_{m})" + }, + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "text", + "content": ", driving frequency " + }, + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "inline_equation", + "content": "(\\omega)" + }, + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "text", + "content": ", natural frequency " + }, + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "inline_equation", + "content": "(\\omega_{n})" + }, + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "text", + "content": " and amplitude " + }, + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "inline_equation", + "content": "(x)" + }, + { + "bbox": [ + 90, + 179, + 203, + 209 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 91, + 218, + 101, + 231 + ], + "blocks": [ + { + "bbox": [ + 91, + 218, + 101, + 231 + ], + "lines": [ + { + "bbox": [ + 91, + 218, + 101, + 231 + ], + "spans": [ + { + "bbox": [ + 91, + 218, + 101, + 231 + ], + "type": "image", + "image_path": "dd4ace02bfdbbc327dd22c6ca51c918e6611e6e1a0a6dffb9ff8c4f2e067743e.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 296, + 542, + 340 + ], + "lines": [ + { + "bbox": [ + 51, + 296, + 542, + 340 + ], + "spans": [ + { + "bbox": [ + 51, + 296, + 542, + 340 + ], + "type": "text", + "content": "Figure 2. Overview of the LLM-based Scientific Equation Discovery. The benchmark tasks (left) combine scientific context with numerical data. The discovery process (middle) iteratively leverages LLM's scientific knowledge and data-driven reasoning to generate hypotheses for underlying equations. Discovered hypotheses, represented as equation strings, trees, or programs, are then evaluated (right) using multiple metrics including data fidelity, symbolic accuracy, and computational efficiency." + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 86, + 234, + 206, + 277 + ], + "blocks": [ + { + "bbox": [ + 112, + 220, + 129, + 228 + ], + "lines": [ + { + "bbox": [ + 112, + 220, + 129, + 228 + ], + "spans": [ + { + "bbox": [ + 112, + 220, + 129, + 228 + ], + "type": "text", + "content": "Data" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 86, + 234, + 206, + 277 + ], + "lines": [ + { + "bbox": [ + 86, + 234, + 206, + 277 + ], + "spans": [ + { + "bbox": [ + 86, + 234, + 206, + 277 + ], + "type": "table", + "html": "
\\( E_{\\mathrm {n}} \\)ω\\( \\omega_0 \\)xm
4.71.22.31.51.2
3.42.72.73.10.1
i
2.81.53.61.40.4
", + "image_path": "e0d63bd33ab92d526518753edc49274d6d59f20397bf8d0354c580b9853762fa.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 223, + 66, + 241, + 82 + ], + "blocks": [ + { + "bbox": [ + 223, + 66, + 241, + 82 + ], + "lines": [ + { + "bbox": [ + 223, + 66, + 241, + 82 + ], + "spans": [ + { + "bbox": [ + 223, + 66, + 241, + 82 + ], + "type": "image", + "image_path": "eaaa041fd9512203f4e34c4bec9c0a6c971fa1e475a5cc93922305ebde0dc5dc.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 244, + 69, + 300, + 77 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 69, + 300, + 77 + ], + "spans": [ + { + "bbox": [ + 244, + 69, + 300, + 77 + ], + "type": "text", + "content": "Typical Workflow" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 252, + 89, + 265, + 102 + ], + "blocks": [ + { + "bbox": [ + 252, + 89, + 265, + 102 + ], + "lines": [ + { + "bbox": [ + 252, + 89, + 265, + 102 + ], + "spans": [ + { + "bbox": [ + 252, + 89, + 265, + 102 + ], + "type": "image", + "image_path": "b4248be90e578b449703e07ee0bf472829ee92a629bf290e7b43622f53874a51.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 270, + 89, + 329, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 89, + 329, + 105 + ], + "spans": [ + { + "bbox": [ + 270, + 89, + 329, + 105 + ], + "type": "text", + "content": "LLM internal scientific knowledge" + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 251, + 106, + 265, + 118 + ], + "blocks": [ + { + "bbox": [ + 251, + 106, + 265, + 118 + ], + "lines": [ + { + "bbox": [ + 251, + 106, + 265, + 118 + ], + "spans": [ + { + "bbox": [ + 251, + 106, + 265, + 118 + ], + "type": "image", + "image_path": "2943312887403cbd3614ec26d86312c9ed3797876e4b24a507d8b8ccaf08be34.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 268, + 109, + 332, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 109, + 332, + 117 + ], + "spans": [ + { + "bbox": [ + 268, + 109, + 332, + 117 + ], + "type": "text", + "content": "Reasoning and planning" + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 252, + 123, + 266, + 133 + ], + "blocks": [ + { + "bbox": [ + 252, + 123, + 266, + 133 + ], + "lines": [ + { + "bbox": [ + 252, + 123, + 266, + 133 + ], + "spans": [ + { + "bbox": [ + 252, + 123, + 266, + 133 + ], + "type": "image", + "image_path": "83fdb943052af6819deb8c9f9e526ad919dcda5736b655b45a9e19dc080f02a4.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 276, + 125, + 312, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 125, + 312, + 133 + ], + "spans": [ + { + "bbox": [ + 276, + 125, + 312, + 133 + ], + "type": "text", + "content": "Programming" + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 232, + 143, + 358, + 217 + ], + "blocks": [ + { + "bbox": [ + 232, + 143, + 358, + 217 + ], + "lines": [ + { + "bbox": [ + 232, + 143, + 358, + 217 + ], + "spans": [ + { + "bbox": [ + 232, + 143, + 358, + 217 + ], + "type": "image", + "image_path": "a7c6d759afe9fd77afb493d4e3d1f4cb1fd3debd494f956b9c7e6f32ff6755e4.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 253, + 221, + 268, + 237 + ], + "blocks": [ + { + "bbox": [ + 253, + 221, + 268, + 237 + ], + "lines": [ + { + "bbox": [ + 253, + 221, + 268, + 237 + ], + "spans": [ + { + "bbox": [ + 253, + 221, + 268, + 237 + ], + "type": "image", + "image_path": "a2d3aa7e47027c5fc32383269784b60c4e30210c9a1ddb79ea5b1ae367be0d02.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "bbox": [ + 270, + 225, + 333, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 225, + 333, + 232 + ], + "spans": [ + { + "bbox": [ + 270, + 225, + 333, + 232 + ], + "type": "text", + "content": "Parameter Optimization" + } + ] + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 252, + 241, + 266, + 254 + ], + "blocks": [ + { + "bbox": [ + 252, + 241, + 266, + 254 + ], + "lines": [ + { + "bbox": [ + 252, + 241, + 266, + 254 + ], + "spans": [ + { + "bbox": [ + 252, + 241, + 266, + 254 + ], + "type": "image", + "image_path": "bb81ea4b5f5d5a4f975ff0466f5ed65eeae3a9584feb539e69d440e2f25d1471.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "bbox": [ + 268, + 244, + 297, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 244, + 297, + 251 + ], + "spans": [ + { + "bbox": [ + 268, + 244, + 297, + 251 + ], + "type": "text", + "content": "Simulation" + } + ] + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 304, + 240, + 312, + 252 + ], + "blocks": [ + { + "bbox": [ + 304, + 240, + 312, + 252 + ], + "lines": [ + { + "bbox": [ + 304, + 240, + 312, + 252 + ], + "spans": [ + { + "bbox": [ + 304, + 240, + 312, + 252 + ], + "type": "image", + "image_path": "17beb428c63f2aabd0dfe88d4ab8d11372d076979bffbc2f467162f6659a2623.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "bbox": [ + 309, + 243, + 342, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 243, + 342, + 251 + ], + "spans": [ + { + "bbox": [ + 309, + 243, + 342, + 251 + ], + "type": "text", + "content": "Experiments" + } + ] + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 257, + 257, + 271, + 270 + ], + "blocks": [ + { + "bbox": [ + 257, + 257, + 271, + 270 + ], + "lines": [ + { + "bbox": [ + 257, + 257, + 271, + 270 + ], + "spans": [ + { + "bbox": [ + 257, + 257, + 271, + 270 + ], + "type": "image", + "image_path": "4336f60ac23a31b7303fd2bcd302d1a07db5ad0fb5084d34cb24e039edff4412.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "bbox": [ + 273, + 262, + 328, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 273, + 262, + 328, + 268 + ], + "spans": [ + { + "bbox": [ + 273, + 262, + 328, + 268 + ], + "type": "text", + "content": "Statistical Fit to Data" + } + ] + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 390, + 67, + 403, + 81 + ], + "blocks": [ + { + "bbox": [ + 390, + 67, + 403, + 81 + ], + "lines": [ + { + "bbox": [ + 390, + 67, + 403, + 81 + ], + "spans": [ + { + "bbox": [ + 390, + 67, + 403, + 81 + ], + "type": "image", + "image_path": "b2f081c94fa3b127bfd848223e598ebab8dca7669779b99dbbb482707e9719d8.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "bbox": [ + 411, + 69, + 449, + 77 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 411, + 69, + 449, + 77 + ], + "spans": [ + { + "bbox": [ + 411, + 69, + 449, + 77 + ], + "type": "text", + "content": "Hypothesis" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 381, + 85, + 510, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 85, + 510, + 99 + ], + "spans": [ + { + "bbox": [ + 381, + 85, + 510, + 99 + ], + "type": "text", + "content": "- Discovered mathematical equation represented by expressions, trees, programs, etc." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 381, + 106, + 480, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 106, + 480, + 113 + ], + "spans": [ + { + "bbox": [ + 381, + 106, + 480, + 113 + ], + "type": "text", + "content": "- Supporting explanations / reasoning" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 389, + 121, + 439, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 121, + 439, + 127 + ], + "spans": [ + { + "bbox": [ + 389, + 121, + 439, + 127 + ], + "type": "inline_equation", + "content": "m = 4^{*}E n / (x^{**}2^{*}" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 389, + 128, + 457, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 128, + 457, + 134 + ], + "spans": [ + { + "bbox": [ + 389, + 128, + 457, + 134 + ], + "type": "text", + "content": "(omega\\*\\*2 + omega_0\\*\\*2))" + } + ] + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 462, + 114, + 511, + 152 + ], + "blocks": [ + { + "bbox": [ + 462, + 114, + 511, + 152 + ], + "lines": [ + { + "bbox": [ + 462, + 114, + 511, + 152 + ], + "spans": [ + { + "bbox": [ + 462, + 114, + 511, + 152 + ], + "type": "image", + "image_path": "e19abcc9793b69233bc2068d01085943b55800e6c5368956853e58e8c2357a1e.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "bbox": [ + 383, + 147, + 492, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 383, + 147, + 492, + 162 + ], + "spans": [ + { + "bbox": [ + 383, + 147, + 492, + 162 + ], + "type": "text", + "content": "def equation(E_n, omega, omega_0, x, params): # Energy-mass ratio normalized by parameter numerator " + }, + { + "bbox": [ + 383, + 147, + 492, + 162 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 383, + 147, + 492, + 162 + ], + "type": "text", + "content": " params[0] " + }, + { + "bbox": [ + 383, + 147, + 492, + 162 + ], + "type": "inline_equation", + "content": "\\ast" + }, + { + "bbox": [ + 383, + 147, + 492, + 162 + ], + "type": "text", + "content": " E n" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 392, + 163, + 509, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 163, + 509, + 183 + ], + "spans": [ + { + "bbox": [ + 392, + 163, + 509, + 183 + ], + "type": "text", + "content": "Combined frequency and amplitude scaling effects denominator " + }, + { + "bbox": [ + 392, + 163, + 509, + 183 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 392, + 163, + 509, + 183 + ], + "type": "text", + "content": " omega++2 " + }, + { + "bbox": [ + 392, + 163, + 509, + 183 + ], + "type": "inline_equation", + "content": "\\ast" + }, + { + "bbox": [ + 392, + 163, + 509, + 183 + ], + "type": "text", + "content": " x++2 + omega_0**2 " + }, + { + "bbox": [ + 392, + 163, + 509, + 183 + ], + "type": "inline_equation", + "content": "\\ast" + }, + { + "bbox": [ + 392, + 163, + 509, + 183 + ], + "type": "text", + "content": " x**2 m " + }, + { + "bbox": [ + 392, + 163, + 509, + 183 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 392, + 163, + 509, + 183 + ], + "type": "text", + "content": " numerator / denominator return m" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 406, + 198, + 441, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 198, + 441, + 205 + ], + "spans": [ + { + "bbox": [ + 406, + 198, + 441, + 205 + ], + "type": "text", + "content": "Evaluation" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 392, + 212, + 494, + 232 + ], + "type": "list", + "angle": 0, + "index": 45, + "blocks": [ + { + "bbox": [ + 392, + 212, + 440, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 212, + 440, + 218 + ], + "spans": [ + { + "bbox": [ + 392, + 212, + 440, + 218 + ], + "type": "text", + "content": "Data Fidelity:" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 416, + 219, + 470, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 416, + 219, + 470, + 225 + ], + "spans": [ + { + "bbox": [ + 416, + 219, + 470, + 225 + ], + "type": "text", + "content": "In-Domain accuracy" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 406, + 226, + 494, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 226, + 494, + 232 + ], + "spans": [ + { + "bbox": [ + 406, + 226, + 494, + 232 + ], + "type": "text", + "content": "- Out-of-Domain generalization" + } + ] + } + ], + "index": 44 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 392, + 237, + 459, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 237, + 459, + 244 + ], + "spans": [ + { + "bbox": [ + 392, + 237, + 459, + 244 + ], + "type": "text", + "content": "- Symbolic Accuracy:" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 406, + 245, + 496, + 265 + ], + "type": "list", + "angle": 0, + "index": 50, + "blocks": [ + { + "bbox": [ + 406, + 245, + 496, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 245, + 496, + 251 + ], + "spans": [ + { + "bbox": [ + 406, + 245, + 496, + 251 + ], + "type": "text", + "content": "Human expert/LLM evaluator" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 406, + 251, + 470, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 251, + 470, + 257 + ], + "spans": [ + { + "bbox": [ + 406, + 251, + 470, + 257 + ], + "type": "text", + "content": "Scientific plausibility" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 406, + 258, + 455, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 258, + 455, + 265 + ], + "spans": [ + { + "bbox": [ + 406, + 258, + 455, + 265 + ], + "type": "text", + "content": "Interpretability" + } + ] + } + ], + "index": 49 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 392, + 270, + 473, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 270, + 473, + 277 + ], + "spans": [ + { + "bbox": [ + 392, + 270, + 473, + 277 + ], + "type": "text", + "content": "Computational Efficiency" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 52, + 360, + 291, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 360, + 291, + 444 + ], + "spans": [ + { + "bbox": [ + 52, + 360, + 291, + 444 + ], + "type": "text", + "content": "vast search spaces. Second, unlike human scientists who leverage their domain knowledge and expertise to guide hypothesis formation, these approaches are mostly purely data-driven, and isolated from existing scientific knowledge. These limitations have motivated researchers to develop methods that incorporate scientific domain knowledge into the equation discovery process." + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 52, + 450, + 291, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 450, + 291, + 617 + ], + "spans": [ + { + "bbox": [ + 52, + 450, + 291, + 617 + ], + "type": "text", + "content": "Large Language Models (LLMs) have recently emerged as a promising solution to these challenges, offering a new paradigm for scientific equation discovery. LLMs, trained on vast corpora of scientific literature, possess extensive embedded scientific knowledge. This has sparked significant interest in leveraging LLMs for scientific equation discovery, with several recent works demonstrating their potential (Shojae et al., 2024b; Ma et al., 2024; Grayeli et al., 2024; Merler et al., 2024; Du et al., 2024; Reddy & Shojaee, 2024; Zhang et al., 2024). These LLM-based approaches have shown to enhance the equation hypothesis generation process by incorporating scientific priors, guiding the exploration of equation search spaces more efficiently, and providing interpretable reasoning for the search process." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 52, + 623, + 291, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 623, + 291, + 707 + ], + "spans": [ + { + "bbox": [ + 52, + 623, + 291, + 707 + ], + "type": "text", + "content": "Despite the promising potential of LLM-based equation discovery methods, their rigorous and robust evaluation still remains an open challenge. The current scientific equation discovery benchmarks are primarily represented by SRBench (La Cava et al., 2021) and SRSD (Matsubara et al., 2022). SRBench incorporates two key data groups for this purpose: the Feynman physics equations (Udrescu" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 303, + 360, + 543, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 360, + 543, + 468 + ], + "spans": [ + { + "bbox": [ + 303, + 360, + 543, + 468 + ], + "type": "text", + "content": "& Tegmark, 2020), and Strogatz dynamical systems (La Cava et al., 2016; Strogatz, 2018). A notable extension to this framework is SRSD (Matsubara et al., 2022), which enhances the Feynman benchmark by incorporating physically meaningful sampling ranges for data points. However, these benchmarks exhibit significant limitations for the evaluation of LLM-based methods. Their problems are mostly based on known physics equations from textbooks, which makes them often subject to memorization by LLMs." + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 303, + 474, + 544, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 474, + 544, + 713 + ], + "spans": [ + { + "bbox": [ + 303, + 474, + 544, + 713 + ], + "type": "text", + "content": "As noted by (Shojaaee et al., 2024b), LLMs frequently succeed on these common equation discovery benchmarks through simple recitation based on variable names and problem descriptions, rather than the actual process of data-driven discovery and reasoning. Our analysis (shown in Fig. 1) also confirms this finding - the sudden drop in the numeric error curve within the first few iterations and significantly lower symbolic error on Feynman problems indicate memorized solutions rather than a meaningful search towards discovery. To mitigate this issue, (Shojaaee et al., 2024b; Ma et al., 2024) have introduced a handful of five custom-crafted problems designed to prevent memorization by manually modifying known physical models. While these efforts represent a step forward, the small scale and limited diversity of these problem sets are insufficient to provide a comprehensive evaluation framework for emerging LLM-based methods in scientific equation discovery. A more robust and systematic benchmark is needed to enable standardized evaluation and foster the development of innovative methods in this emerging field." + } + ] + } + ], + "index": 57 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 58 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 68, + 291, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 68, + 291, + 450 + ], + "spans": [ + { + "bbox": [ + 52, + 68, + 291, + 450 + ], + "type": "text", + "content": "In this paper, we introduce LLM-SRBench, a new benchmark designed to rigorously evaluate the capabilities of LLM-based scientific equation discovery methods. LLM-SRBench addresses the limitations of existing benchmarks by constructing problem sets that avoid trivial recitation while leveraging the scientific priors embedded in LLMs, simulating conditions akin to scientific discovery. The benchmark is structured around two main categories of problems, each targeting distinct aspects of equation discovery. The first category focuses on transforming common scientific problems, such as those from the Feynman equations, into different mathematical representations of the same underlying physical problem. By symbolically altering input-output mappings and generating less common mathematical forms for the same problem, we challenge LLM-based equation discovery to go beyond memorization of the common forms. This approach is motivated by recent findings on the fragility of LLMs' reasoning capabilities to unfamiliar representations of otherwise familiar problems (Mirzadeh et al., 2024; Xie et al., 2024; Wu et al., 2023). The second category extends the approach introduced by (Shojae et al., 2024b), which combines known terms in the underlying equation with synthetic, novel terms to create problems that go beyond memorization and demand data-driven reasoning. We expand this idea into a comprehensive set of benchmark problems spanning diverse scientific domains. These problems incorporate carefully designed synthetic terms that are both novel and plausible. We further verify the solvability of the generated equations using numerical solvers, ensuring that the benchmark problems remain grounded in physical feasibility while presenting meaningful challenges for LLM-based discovery methods." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 456, + 292, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 456, + 292, + 647 + ], + "spans": [ + { + "bbox": [ + 52, + 456, + 292, + 647 + ], + "type": "text", + "content": "LLM-SRBench comprises 111 problems in the first category (LSR-Transform), and 128 problems in the second category (LSR-Synth), spanning four scientific domains: chemistry (36), biology (24), physics (43), and material science (25). We comprehensively benchmark state-of-the-art LLM-based scientific equation discovery methods with several LLM backbones on these datasets. Our experiments reveal several key insights into the capabilities and limitations of current LLM-based scientific equation discovery methods. Results show that the best model can only solve " + }, + { + "bbox": [ + 52, + 456, + 292, + 647 + ], + "type": "inline_equation", + "content": "31.5\\%" + }, + { + "bbox": [ + 52, + 456, + 292, + 647 + ], + "type": "text", + "content": " of problems on LSR-Transform and " + }, + { + "bbox": [ + 52, + 456, + 292, + 647 + ], + "type": "inline_equation", + "content": "28.1\\%" + }, + { + "bbox": [ + 52, + 456, + 292, + 647 + ], + "type": "text", + "content": " on LSR-Synth. This underscores the challenging nature of the tasks in LLM-SRBench and highlights its potential as a critical evaluation foundation for future LLM-based scientific equation discovery methods. Overall, the contributions of this work are as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 669, + 292, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 669, + 292, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 669, + 292, + 718 + ], + "type": "text", + "content": "- We introduce LLM-SRBench, the first comprehensive benchmark with 239 challenging problems across various scientific domains, designed to evaluate LLM-based scientific equation discovery methods." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 67, + 542, + 169 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 304, + 67, + 542, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 542, + 128 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 542, + 128 + ], + "type": "text", + "content": "- We propose a novel benchmark design through alternative mathematical representations (LSR-Transform) and synthetic, discovery-driven problems (LSR-Synth) to ensure rigorous evaluation of scientific reasoning and discovery capabilities beyond LLM memorization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 133, + 542, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 133, + 542, + 169 + ], + "spans": [ + { + "bbox": [ + 304, + 133, + 542, + 169 + ], + "type": "text", + "content": "- Extensive experiments on state-of-the-art methods reveal performance peaks at " + }, + { + "bbox": [ + 304, + 133, + 542, + 169 + ], + "type": "inline_equation", + "content": "31\\%" + }, + { + "bbox": [ + 304, + 133, + 542, + 169 + ], + "type": "text", + "content": ", highlighting the benchmark's challenging nature and its potential for future research." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 186, + 399, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 186, + 399, + 198 + ], + "spans": [ + { + "bbox": [ + 304, + 186, + 399, + 198 + ], + "type": "text", + "content": "2. LLM-SRBench" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 206, + 543, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 206, + 543, + 350 + ], + "spans": [ + { + "bbox": [ + 303, + 206, + 543, + 350 + ], + "type": "text", + "content": "We introduce LLM-SRBench, a novel benchmark designed to evaluate LLM-based methods for data-driven scientific equation discovery. As shown in Fig. 2, in this benchmark, a \"data-driven scientific equation discovery\" task is defined as follows: Given a task dataset " + }, + { + "bbox": [ + 303, + 206, + 543, + 350 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 303, + 206, + 543, + 350 + ], + "type": "text", + "content": ", the corresponding scientific context " + }, + { + "bbox": [ + 303, + 206, + 543, + 350 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 303, + 206, + 543, + 350 + ], + "type": "text", + "content": ", the objective is to derive a hypothesis " + }, + { + "bbox": [ + 303, + 206, + 543, + 350 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 303, + 206, + 543, + 350 + ], + "type": "text", + "content": " that represents the underlying mathematical relations behind the data with high precision and scientific plausibility. This process resembles the iterative search and refinement undertaken by human scientists, where LLMs act as optimizers, proposing and refining hypotheses based on both scientific knowledge and empirical data." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 358, + 394, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 358, + 394, + 369 + ], + "spans": [ + { + "bbox": [ + 304, + 358, + 394, + 369 + ], + "type": "text", + "content": "2.1. LSR-Transform" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 376, + 544, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 376, + 544, + 509 + ], + "spans": [ + { + "bbox": [ + 303, + 376, + 544, + 509 + ], + "type": "text", + "content": "This category is designed to evaluate whether LLM-based methods can discover equations in less common mathematical forms, avoiding reliance on memorization of well-known representations. This approach is motivated by the observation that LLMs often struggle with unfamiliar instantiations of otherwise familiar problems, as highlighted by recent studies on the fragility of LLM reasoning (Mirzadeh et al., 2024; Xie et al., 2024; Wu et al., 2023). By transforming existing benchmark problems into different mathematical representations, we challenge LLMs' capabilities in data-driven scientific equation discovery and reasoning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 514, + 544, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 514, + 544, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 514, + 544, + 717 + ], + "type": "text", + "content": "We build on the Feynman (Udrescu & Tegmark, 2020) benchmark (current standard benchmark in scientific equation discovery), which consists of 100 physics equations, and systematically transform these equations into alternative mathematical forms (examples in App. A.1). As demonstrated in Fig. 3(a), the transformation process involves seven key steps: 1) Equation Collection: We gather the original mathematical expressions, along with their input and output variables, and scientific problem descriptions from the Feynman benchmark. 2) Select Pivot Variable: For each equation, we choose an input feature to become the new target variable. 3) Feature-Target Transformation: We transform the dataset by switching the roles of the selected input feature and the original target variable. 4) Symbolic Transformation: Using the SymPy library in Python on the parsed expressions, we solve each equation with respect to the selected input variable, treating it" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 77, + 67, + 293, + 281 + ], + "blocks": [ + { + "bbox": [ + 77, + 67, + 293, + 281 + ], + "lines": [ + { + "bbox": [ + 77, + 67, + 293, + 281 + ], + "spans": [ + { + "bbox": [ + 77, + 67, + 293, + 281 + ], + "type": "image", + "image_path": "f71f46fe53f6fb3b1b827b582fcd45a5f92ea2842866c6ce5dcdfaf13dedb8d8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 283, + 221, + 293 + ], + "lines": [ + { + "bbox": [ + 157, + 283, + 221, + 293 + ], + "spans": [ + { + "bbox": [ + 157, + 283, + 221, + 293 + ], + "type": "text", + "content": "(a) LSR-Transform" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 298, + 66, + 516, + 280 + ], + "blocks": [ + { + "bbox": [ + 298, + 66, + 516, + 280 + ], + "lines": [ + { + "bbox": [ + 298, + 66, + 516, + 280 + ], + "spans": [ + { + "bbox": [ + 298, + 66, + 516, + 280 + ], + "type": "image", + "image_path": "4c620bd69fe9f710128e1e41b30a46b9f571f0d7ea8754a51132638fc907b0c9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 386, + 282, + 435, + 293 + ], + "lines": [ + { + "bbox": [ + 386, + 282, + 435, + 293 + ], + "spans": [ + { + "bbox": [ + 386, + 282, + 435, + 293 + ], + "type": "text", + "content": "(b) LSR-Synth" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 304, + 543, + 350 + ], + "lines": [ + { + "bbox": [ + 51, + 304, + 543, + 350 + ], + "spans": [ + { + "bbox": [ + 51, + 304, + 543, + 350 + ], + "type": "text", + "content": "Figure 3. Data generation pipelines for the two dataset categories in LLM-SRBench. (a) LSR-Transform converts Feynman problems into alternative mathematical forms through symbolic transformation and input-output role switching, and (b) LSR-Synth generates novel discovery-driven problems by combining known scientific terms in the underlying models with synthetic novel terms. Both pipelines include validation steps to ensure solvability and scientific plausibility." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 364, + 291, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 364, + 291, + 616 + ], + "spans": [ + { + "bbox": [ + 52, + 364, + 291, + 616 + ], + "type": "text", + "content": "as the new output and the original output variable as an input in the transformed equation. 5) Solvability Check: We retain only those transformations that are analytically solvable, ensuring the feasibility of the resulting equations. 6) Dataset Refinement: For the transformed equations with altered data domains (e.g., due to square roots or denominators), we filter the original Feynman dataset to ensure all data points fall within the valid domains of the new equations. 7) Problem Reformulation: Using LLM (GPT4o), we generate a new natural language specification for each transformed problem. During this data generation process, we constrain the transformed equations' complexity (measured by expression tree node count) to the range of original Feynman dataset distribution (full analysis in Fig. 8, App.A.1). This allows us to focus on the semantic aspects of discovery—specifically the interplay between reasoning and memorization of the mathematical forms—rather than conflating performance with the ability to handle syntactically complex and lengthy hypotheses. We also exclude transformed problems that LLM can solve through direct sampling without requiring access to data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 620, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 620, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 620, + 291, + 717 + ], + "type": "text", + "content": "This process yields 111 total transformed equations derived from the 100 original Feynman problems. Each transformed equation shares the same scientific context, problem description, and variables as its original counterpart but presents a less common mathematical form to be discovered. The goal of LSR-Transform is not to discover new equations but to evaluate whether LLM-based systems can validate discoveries from non-trivial, data-driven transformations of known" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 364, + 544, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 364, + 544, + 460 + ], + "spans": [ + { + "bbox": [ + 304, + 364, + 544, + 460 + ], + "type": "text", + "content": "equations. To support scientific knowledge-guided discovery, each task in LSR-Transform is supplemented with a natural language description of the scientific problem and dataset, including variable names and their meanings. These descriptions are absent in the original Feynman benchmark but they are needed for LLM-based scientific equation discovery methods to provide scientific context in prompts for knowledge-guided equation discovery by LLMs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 472, + 373, + 484 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 472, + 373, + 484 + ], + "spans": [ + { + "bbox": [ + 304, + 472, + 373, + 484 + ], + "type": "text", + "content": "2.2. LSR-Synth" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 491, + 544, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 491, + 544, + 695 + ], + "spans": [ + { + "bbox": [ + 303, + 491, + 544, + 695 + ], + "type": "text", + "content": "This category is designed to assess whether LLMs can discover equations that incorporate new synthetic terms alongside known terms, requiring scientific as well as data-driven reasoning rather than reliance on memorization. The LSR-Synth dataset is motivated by the approach introduced in (Shojae et al., 2024b) for the handful of manually designed problems and systematically expands it into a comprehensive set of benchmark problems across diverse scientific domains. By combining known terms with synthetic, novel terms, LLMs are challenged to demonstrate discovery capabilities in unobserved contexts, yet leverage their knowledge in the process. The LSR-Synth dataset spans four scientific domains: chemistry, biology, physics, and material science, focusing on key scientific problems, including reaction kinetics in chemistry, population growth in biology, damped harmonic oscillators in physics, and stress-strain relationships in material science (examples in App. A.2)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 700, + 544, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 700, + 544, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 700, + 544, + 713 + ], + "type": "text", + "content": "The data generation process for LSR-Synth involves multi" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "spans": [ + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "text", + "content": "ple steps , as illustrated in Fig. 3(b), to ensure the creation of high-quality, challenging benchmark problems: 1) Select Scientific Problem: We select problems from different scientific domains, such as reaction kinetics in chemistry or population dynamics in biology. 2) Known Term Generation: Given the problem description, we prompt an LLM (GPT-4o) to generate a list of common and well-known mathematical terms that typically appear in the underlying models. 3) Synthetic Term Generation: Similarly, we prompt the LLM to generate a list of diverse novel synthetic terms for a given scientific problem, along with descriptions of the problem and variables. For example, in chemistry reaction kinetics, known terms for reaction rate " + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "inline_equation", + "content": "(dA / dt)" + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "text", + "content": " based on concentration " + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "inline_equation", + "content": "(A)" + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "text", + "content": " and time " + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "inline_equation", + "content": "(t)" + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "text", + "content": " might include first-order " + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "inline_equation", + "content": "(-kA)" + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "text", + "content": " and second-order kinetics " + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "inline_equation", + "content": "(-kA^2)" + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "text", + "content": " or the exponential decay term " + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "inline_equation", + "content": "-k\\exp (-k_st)" + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "text", + "content": ", while synthetic terms could represent non-linear high-order saturation, e.g., " + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "inline_equation", + "content": "kA^2 /(1 + \\beta A^4)" + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "text", + "content": ", or non-linear quantum tunneling effects, e.g., " + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "inline_equation", + "content": "kA\\exp (-\\frac{\\gamma}{t}) / t^2" + }, + { + "bbox": [ + 52, + 68, + 291, + 583 + ], + "type": "text", + "content": ". 4) Solvability Check: After sampling from the generated known and synthetic terms and combining them into a complete mathematical expression, we verify the solvability of these expressions using numerical solvers such as solve_ivp in Python. This step ensures that the expressions are feasible, providing a basis for generating datapoints. 5) Novelty Check: In the context of each scientific problem and the complete expression, we evaluate the novelty of the new generated task using LLM (GPT-4o) as a novelty evaluator. This step is to verify that the synthetic terms are novel in the provided context and require data-driven reasoning rather than relying on established knowledge to be discovered. 6) Databe point Generation: For expressions that pass the solvability and novelty checks, we generate datapoints using numerical solvers based on the specified initial conditions and parameters. These datapoints are used to create the final task datasets. 7) Expert Validation: Finally, the filtered expressions, along with visualizations of their generated datapoints, are cross-checked by two subject matter experts to validate their plausibility. After these filtering steps, we finalize a candidate list of 128 problems across the four domains (36: chemistry; 24: biology; 43: physics; and 25: material science). More detailed analysis of LLM-SRBench datasets are provided in App. A." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 594, + 119, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 594, + 119, + 605 + ], + "spans": [ + { + "bbox": [ + 53, + 594, + 119, + 605 + ], + "type": "text", + "content": "2.3. Evaluation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 613, + 291, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 613, + 291, + 708 + ], + "spans": [ + { + "bbox": [ + 52, + 613, + 291, + 708 + ], + "type": "text", + "content": "Evaluating LLM-based scientific equation discovery methods introduces unique challenges due to the open-ended nature of the task and diverse symbolic representation of hypotheses. A discovered equation can be assessed from two perspectives: (a) data fidelity, which measures how well the equation fits the observed and out-of-domain (OOD) data, and (b) symbolic accuracy, which evaluates the alignment with ground-truth symbolic equation hypotheses. Both" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 67, + 542, + 91 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 542, + 91 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 542, + 91 + ], + "type": "text", + "content": "perspectives are critical, as equations may exhibit similar symbolic forms but differ numerically, or vice versa." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 95, + 542, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 95, + 542, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 95, + 542, + 180 + ], + "type": "text", + "content": "Data Fidelity. We evaluate data-driven fidelity using two known metrics in equation discovery: (1) Accuracy to tolerance " + }, + { + "bbox": [ + 304, + 95, + 542, + 180 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 95, + 542, + 180 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 95, + 542, + 180 + ], + "type": "inline_equation", + "content": "\\mathrm{Acc}_{\\tau}" + }, + { + "bbox": [ + 304, + 95, + 542, + 180 + ], + "type": "text", + "content": ") (Kamienny et al., 2022; Biggio et al., 2021), and Normalized Mean Squared Error (NMSE). These metrics are computed on both in-domain test data and OOD data (when available) to assess generalization capacity, a crucial requirement for scientific equations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 343, + 191, + 503, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 191, + 503, + 217 + ], + "spans": [ + { + "bbox": [ + 343, + 191, + 503, + 217 + ], + "type": "interline_equation", + "content": "\\operatorname {A c c} _ {\\tau} = \\mathbb {1} \\left(\\max _ {1 \\leq i \\leq N _ {\\text {t e s t}}} \\left| \\frac {\\hat {y} _ {i} - y _ {i}}{y _ {i}} \\right| \\leq \\tau\\right),", + "image_path": "45adbee8894555e3106f5a2dee37d6404abf1849e69eefb9c0dccc433661da59.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 386, + 220, + 503, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 220, + 503, + 251 + ], + "spans": [ + { + "bbox": [ + 386, + 220, + 503, + 251 + ], + "type": "interline_equation", + "content": "\\mathrm {N M S E} = \\frac {\\sum_ {i = 1} ^ {N _ {\\mathrm {t e s t}}} (\\hat {y} _ {i} - y _ {i}) ^ {2}}{\\sum_ {i = 1} ^ {N _ {\\mathrm {t e s t}}} (y _ {i} - \\bar {y}) ^ {2}}", + "image_path": "567618e539e7aaa3e227f4a99d09b32c252bb82a09a58ea4265eb74d2ca07490.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 256, + 543, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 256, + 543, + 506 + ], + "spans": [ + { + "bbox": [ + 304, + 256, + 543, + 506 + ], + "type": "text", + "content": "Symbolic Accuracy. We evaluate symbolic accuracy with a model-based evaluation strategy using GPT-4o as an evaluator (prompt in App. B, Fig. 11). This approach addresses the limitations of current symbolic metrics like recovery rate in symbolic regression (La Cava et al., 2016), which are very sensitive to exact symbolic matches and fail to account for mathematical equivalence, particularly in different hypothesis representations (e.g., equation as strings, expression trees, or Python programs). Here, GPT-4o evaluates mathematical equivalence by comparing the symbolic form of the predicted hypothesis versus the ground-truth equation after removing parameters and constants. The ability of LLMs to recognize semantic equivalence across different representations makes them particularly well-suited for evaluating LLM-based equation discovery methods, which often operate within a more diverse and open-ended hypothesis space. To validate this metric, two authors also independently evaluated symbolic equivalence on 130 sampled problems, finding " + }, + { + "bbox": [ + 304, + 256, + 543, + 506 + ], + "type": "inline_equation", + "content": "94.6\\%" + }, + { + "bbox": [ + 304, + 256, + 543, + 506 + ], + "type": "text", + "content": " agreement between GPT-4o and human evaluators. App. B provides more details on the evaluation metrics." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 522, + 385, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 385, + 536 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 385, + 536 + ], + "type": "text", + "content": "3. Experiments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 543, + 410, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 543, + 410, + 555 + ], + "spans": [ + { + "bbox": [ + 304, + 543, + 410, + 555 + ], + "type": "text", + "content": "3.1. Experimental Setup" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 562, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 562, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 562, + 542, + 717 + ], + "type": "text", + "content": "We benchmark state-of-the-art LLM-based scientific equation discovery methods using three LLM backbones: one open-source model (Llama-3.1-8B-Instruct) and two proprietary models (GPT-4o-mini and GPT-3.5-turbo). Each discovery task takes as input the problem description, variables, the corresponding dataset, and an instruction specifying the task. The discovery methods then generate and refine equation hypotheses through LLMs. To ensure fair comparison, we standardize each of the methods to use 1k LLM calls per problem while maintaining their core algorithmic designs and hyperparameter settings. Detailed implementation specifics and prompts of each method are provided in App. C. We" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 55, + 114, + 542, + 340 + ], + "blocks": [ + { + "bbox": [ + 52, + 74, + 543, + 108 + ], + "lines": [ + { + "bbox": [ + 52, + 74, + 543, + 108 + ], + "spans": [ + { + "bbox": [ + 52, + 74, + 543, + 108 + ], + "type": "text", + "content": "Table 1. Comparison of different LLM-based scientific equation discovery methods on LLM-SRBench. Performance metrics include symbolic accuracy (SA), numeric precision " + }, + { + "bbox": [ + 52, + 74, + 543, + 108 + ], + "type": "inline_equation", + "content": "(\\mathrm{Acc}_{0.1})" + }, + { + "bbox": [ + 52, + 74, + 543, + 108 + ], + "type": "text", + "content": ", and normalized mean squared error (NMSE). Bold values indicate best performance within each method, and underlined values show best overall performance across discovery methods." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 55, + 114, + 542, + 340 + ], + "lines": [ + { + "bbox": [ + 55, + 114, + 542, + 340 + ], + "spans": [ + { + "bbox": [ + 55, + 114, + 542, + 340 + ], + "type": "table", + "html": "
ModelsLSR-TransformLSR-Synth
ChemistryBiologyPhysicsMaterial Science
SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓SA (%)↑Acc0.1(%)↑NMSE↓
Direct Prompting (DataBlind)
Llama-3.1-8B-Instruct3.611.8010.36970.00.00.06440.00.00.54810.00.00.04590.00.00.0826
GPT-3.5-turbo2.101.8010.35530.08.330.00230.04.160.59900.02.270.02740.00.00.0277
GPT-4o-mini7.216.3060.26310.013.880.02210.04.160.46484.549.090.06470.00.00.0484
SGA (Ma et al., 2024)
Llama-3.1-8B-Instruct2.700.9090.35190.08.330.04580.00.00.24160.02.270.15490.012.120.0435
GPT-3.5-turbo0.00.9090.34650.08.330.00710.08.330.12792.274.540.02490.028.100.0019
GPT-4o-mini9.918.110.23210.016.665.46e-44.1612.510.01284.549.090.05110.036.116.02e-4
LaSR (Grayeli et al., 2024)
Llama-3.1-8B-Instruct5.4145.940.00210.027.772.77e-44.1616.662.73e-44.5425.020.00188.2164.227.44e-5
GPT-3.5-turbo12.6147.740.00150.038.891.51e-40.016.662.31e-46.8122.710.001120.6664.093.77e-5
GPT-4o-mini6.3150.450.00112.7738.929.11e-58.3320.831.53e-49.9131.819.94e-428.1272.049.23e-6
LLM-SR (Shojaece et al., 2024b)
Llama-3.1-8B-Instruct30.6338.550.01018.3366.668.01e-625.3058.331.04e-66.9734.091.23e-44.1088.121.15e-7
GPT-3.5-turbo10.8110.810.14490.050.222.87e-50.025.032.33e-50.025.128.84e-412.4282.142.75e-8
GPT-4o-mini31.5339.640.009111.1152.774.12e-616.6629.163.06e-69.9136.367.62e-520.2488.283.21e-9
", + "image_path": "9f909e31eff9f4ce76eed70ce90bc2bb14486252d0d4d100e27f4576d9575cab.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 361, + 224, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 361, + 224, + 373 + ], + "spans": [ + { + "bbox": [ + 52, + 361, + 224, + 373 + ], + "type": "text", + "content": "evaluate the following discovery methods:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 378, + 290, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 378, + 290, + 438 + ], + "spans": [ + { + "bbox": [ + 52, + 378, + 290, + 438 + ], + "type": "text", + "content": "LLM-SR (Shojace et al., 2024b), a program search equation discovery method that generates hypotheses of equation skeleton as Python functions with the main idea of combining LLMs' scientific knowledge with multi-island evolutionary search guided by feedback from data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 443, + 291, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 443, + 291, + 515 + ], + "spans": [ + { + "bbox": [ + 52, + 443, + 291, + 515 + ], + "type": "text", + "content": "LaSR (Grayeli et al., 2024), a concept learning equation discovery method that finds abstract textual concepts of mathematical relations from successful equation hypotheses with LLMs and uses these concepts to evolve new hypotheses through a hybrid approach of evolutionary search (with PySR (Cranmer, 2023)) and LLM-guided search." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 520, + 291, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 520, + 291, + 579 + ], + "spans": [ + { + "bbox": [ + 52, + 520, + 291, + 579 + ], + "type": "text", + "content": "SGA (Ma et al., 2024), a bilevel optimization equation discovery method that iteratively combines LLMs for discrete hypothesis generation of scientific laws and physical simulations in PyTorch for continuous parameter optimization with respect to data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 584, + 291, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 584, + 291, + 645 + ], + "spans": [ + { + "bbox": [ + 52, + 584, + 291, + 645 + ], + "type": "text", + "content": "Direct Prompting (DataBlind) serves as a baseline for generating hypotheses purely from contextual information without access to data. By not using data-driven reasoning and refinement in the hypothesis generation, this baseline helps to assess LLMs' memorization of the problem." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 658, + 130, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 658, + 130, + 669 + ], + "spans": [ + { + "bbox": [ + 52, + 658, + 130, + 669 + ], + "type": "text", + "content": "3.2. Main Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 676, + 291, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 676, + 291, + 712 + ], + "spans": [ + { + "bbox": [ + 52, + 676, + 291, + 712 + ], + "type": "text", + "content": "Our experimental results (Table 1) reveals several key insights into the strengths and limitations of LLM-based scientific equation discovery methods. Overall, performance" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 306, + 361, + 421, + 477 + ], + "blocks": [ + { + "bbox": [ + 306, + 361, + 421, + 477 + ], + "lines": [ + { + "bbox": [ + 306, + 361, + 421, + 477 + ], + "spans": [ + { + "bbox": [ + 306, + 361, + 421, + 477 + ], + "type": "image", + "image_path": "e6d31a599f2e56fc3e8bcf784ee3f05f71964409e21e6a4b52c86a09aa0d91e6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 490, + 542, + 545 + ], + "lines": [ + { + "bbox": [ + 304, + 490, + 542, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 490, + 542, + 545 + ], + "type": "text", + "content": "Figure 4. Performance comparison across equation complexity levels for Feynman and LSR-Transform datasets: (a) symbolic accuracy and (b) numeric precision " + }, + { + "bbox": [ + 304, + 490, + 542, + 545 + ], + "type": "inline_equation", + "content": "(\\mathrm{Acc}_{0.1})" + }, + { + "bbox": [ + 304, + 490, + 542, + 545 + ], + "type": "text", + "content": " showing considerable performance gap between these two datasets at same complexity levels (averaged over all method-LLM pairs)." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 425, + 361, + 542, + 477 + ], + "blocks": [ + { + "bbox": [ + 425, + 361, + 542, + 477 + ], + "lines": [ + { + "bbox": [ + 425, + 361, + 542, + 477 + ], + "spans": [ + { + "bbox": [ + 425, + 361, + 542, + 477 + ], + "type": "image", + "image_path": "396f55c66c8f2289a25537401b85bf1309aa6f76d7a756db809edb8483380734.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 303, + 574, + 544, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 574, + 544, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 574, + 544, + 717 + ], + "type": "text", + "content": "remains relatively low across both symbolic and numeric metrics, underscoring the fundamental challenges of this task. One key observation is the poor performance of direct prompting method (DataBlind), which only relies on LLMs' knowledge about the problem without access to data for data-driven refinement. This result underscores the necessity of combining LLM reasoning with observational data, as relying solely on prior knowledge proves insufficient for accurate equation discovery across different problems in LLM-SRBench. We observe that on LSR-Transform data group, LaSR achieves the highest numerical accuracy, leading in both " + }, + { + "bbox": [ + 303, + 574, + 544, + 717 + ], + "type": "inline_equation", + "content": "\\mathrm{Acc}_{0.1}" + }, + { + "bbox": [ + 303, + 574, + 544, + 717 + ], + "type": "text", + "content": " and NMSE, while LLM-SR with GPT" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 67, + 68, + 531, + 216 + ], + "blocks": [ + { + "bbox": [ + 67, + 68, + 531, + 216 + ], + "lines": [ + { + "bbox": [ + 67, + 68, + 531, + 216 + ], + "spans": [ + { + "bbox": [ + 67, + 68, + 531, + 216 + ], + "type": "image", + "image_path": "894c6bbe436e91e344a48f1b4afa6678514260a01ee7aec8ebbf71e5ed22ed03.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 229, + 543, + 253 + ], + "lines": [ + { + "bbox": [ + 51, + 229, + 543, + 253 + ], + "spans": [ + { + "bbox": [ + 51, + 229, + 543, + 253 + ], + "type": "text", + "content": "Figure 5. Detailed results of in-domain (ID) and out-of-domain (OOD) performance using Normalized Mean Squared Error across various LSR-Synth scientific domains and LLM-based equation discovery methods (with GPT-4o-mini as LLM backbone)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 271, + 291, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 271, + 291, + 355 + ], + "spans": [ + { + "bbox": [ + 51, + 271, + 291, + 355 + ], + "type": "text", + "content": "4o-mini outperforms other methods in symbolic accuracy " + }, + { + "bbox": [ + 51, + 271, + 291, + 355 + ], + "type": "inline_equation", + "content": "(\\sim 31\\%)" + }, + { + "bbox": [ + 51, + 271, + 291, + 355 + ], + "type": "text", + "content": ". This comparative advantage inverts in the LSR-Synth material science problems, where LaSR consistently yields better symbolic accuracy and LLM-SR achieves better numerical precision, suggesting that different equation discovery strategies may be better suited to different problems." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 361, + 292, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 361, + 292, + 504 + ], + "spans": [ + { + "bbox": [ + 51, + 361, + 292, + 504 + ], + "type": "text", + "content": "Another notable observation is the consistent outperformance of models using GPT-4o-mini and Llama-3.1-8B compared to those based on GPT-3.5-turbo. This may be due to improved reasoning architectures or better effectiveness of smaller, less opinionated models in the search and exploration needed for navigating space of possible equations. The lower performance on LSR-Synth compared to LSR-Transform tasks also indicates that the ability to find transformed variants of known problems does not necessarily extend to more challenging scenarios involving novel synthetic terms, where systematic data-driven exploration becomes essential." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 513, + 111, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 513, + 111, + 525 + ], + "spans": [ + { + "bbox": [ + 52, + 513, + 111, + 525 + ], + "type": "text", + "content": "3.3. Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 531, + 292, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 531, + 292, + 712 + ], + "spans": [ + { + "bbox": [ + 51, + 531, + 292, + 712 + ], + "type": "text", + "content": "LSR-Transform vs. Feynman datasets. We analyze the performance gap between Feynman and LSR-Transform datasets across different equation complexity levels, measured by the number of nodes in the corresponding expression tree (La Cava et al., 2021). Fig. 4 shows the aggregated average performance (over all methods and LLM backbones) in terms of both symbolic accuracy (a) and numeric precision (b). It can be observed that even at the same complexity levels, LSR-Transform problems are substantially more challenging for current discovery methods than original Feynman problems. Also, this performance disparity persists even for simpler problems ([0-15] nodes), indicating that the challenging nature of LSR-Transform problems for LLM-based scientific equation discovery methods is not necessarily due to the structural complexity." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 303, + 271, + 544, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 271, + 544, + 606 + ], + "spans": [ + { + "bbox": [ + 303, + 271, + 544, + 606 + ], + "type": "text", + "content": "Performance on In-domain vs. OOD. Generalization to unseen data is a fundamental requirement for scientific laws and a critical aspect of equation discovery. A correct mathematical model of observations should not only fit observed data but also extrapolate accurately to out-of-domain (OOD) scenarios. However, current equation discovery benchmarks largely overlook this aspect. In this work, we advocate for explicit OOD assessment in scientific equation discovery by introducing held-out OOD test sets in our benchmark. To systematically evaluate generalization beyond observed data, we generate dedicated OOD test sets for synthetic problems in the LSR-Synth category (see App. A for details on data generation). Fig. 5 provides a comparative analysis of ID vs. OOD results. As expected, all discovery methods exhibit higher NMSE in OOD settings, indicating degraded generalization compared to in-domain data. Among the evaluated methods, LLM-SR achieves the lowest NMSE across both ID and OOD settings, while direct prompting performs the worst. Also, we observe some domain-specific variations in generalization performance: the performance gap between ID and OOD is more pronounced in chemistry and biology problems compared to physics and material science, although the complexity of problems are designed to be similar, as shown in Fig. 10. This suggests that different scientific problems may pose distinct challenges for equation discovery methods, highlighting the need for future research to develop more robust approaches for different scientific disciplines." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 612, + 544, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 612, + 544, + 708 + ], + "spans": [ + { + "bbox": [ + 303, + 612, + 544, + 708 + ], + "type": "text", + "content": "OD generalization and symbolic accuracy. We further analyzed the correlation between our proposed symbolic accuracy metric (Sec. 2.3) and data-driven extrapolation performance in OOD settings (averaged over all LSR-Synth domains). As shown in Fig. 6, symbolic accuracy exhibits a strong positive correlation with numerical precision " + }, + { + "bbox": [ + 303, + 612, + 544, + 708 + ], + "type": "inline_equation", + "content": "(\\mathrm{Acc}_{0.1})" + }, + { + "bbox": [ + 303, + 612, + 544, + 708 + ], + "type": "text", + "content": " on OOD data and a corresponding negative correlation with numerical error (NMSE). This strong correlation observed" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 65, + 179, + 177 + ], + "blocks": [ + { + "bbox": [ + 53, + 65, + 179, + 177 + ], + "lines": [ + { + "bbox": [ + 53, + 65, + 179, + 177 + ], + "spans": [ + { + "bbox": [ + 53, + 65, + 179, + 177 + ], + "type": "image", + "image_path": "ca65ea72e3598758183c47b1c9383295ef13fd6f3425058eab5ecd3109f15d85.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 190, + 291, + 257 + ], + "lines": [ + { + "bbox": [ + 51, + 190, + 291, + 257 + ], + "spans": [ + { + "bbox": [ + 51, + 190, + 291, + 257 + ], + "type": "text", + "content": "Figure 6. Correlation between symbolic accuracy and OOD performance across different equation discovery methods and LLM backbones: (a) symbolic accuracy vs. " + }, + { + "bbox": [ + 51, + 190, + 291, + 257 + ], + "type": "inline_equation", + "content": "\\mathrm{Acc}_{0.1}" + }, + { + "bbox": [ + 51, + 190, + 291, + 257 + ], + "type": "text", + "content": " showing positive correlation; (b) symbolic accuracy vs. normalized mean squared error showing negative correlation. Results are averaged over all LSR-Synth datasets." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 181, + 65, + 288, + 176 + ], + "blocks": [ + { + "bbox": [ + 181, + 65, + 288, + 176 + ], + "lines": [ + { + "bbox": [ + 181, + 65, + 288, + 176 + ], + "spans": [ + { + "bbox": [ + 181, + 65, + 288, + 176 + ], + "type": "image", + "image_path": "829ccf7d250536ce02ae636e646d148b83b71780115b434595a8856f534f2c26.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 283, + 290, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 283, + 290, + 368 + ], + "spans": [ + { + "bbox": [ + 51, + 283, + 290, + 368 + ], + "type": "text", + "content": "between symbolic and OOD performance provides two key insights: First, it establishes OOD evaluation as a powerful approach for assessing the discovery of generalizable equations—an aspect often underutilized in symbolic regression research; second, it validates our LLM-based symbolic evaluation approach through its strong alignment with numeric generalization performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 373, + 291, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 373, + 291, + 421 + ], + "spans": [ + { + "bbox": [ + 51, + 373, + 291, + 421 + ], + "type": "text", + "content": "More detailed experimental results, including both qualitative analyses of discovered equations and quantitative performance comparisons across scientific equation discovery methods and LLMs, are provided in App. D." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 431, + 139, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 431, + 139, + 443 + ], + "spans": [ + { + "bbox": [ + 52, + 431, + 139, + 443 + ], + "type": "text", + "content": "4. Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 448, + 290, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 448, + 290, + 604 + ], + "spans": [ + { + "bbox": [ + 51, + 448, + 290, + 604 + ], + "type": "text", + "content": "AI for Scientific Discovery. Recent advancements in AI for science highlight the ability of LLMs to generate scientific hypotheses by leveraging their extensive knowledge and reasoning capabilities (Lu et al., 2024; Ji et al., 2024; Reddy & Shojaee, 2024). LLM agents, when augmented with external tools and scientific simulators, have shown promise in automated scientific data-driven analysis (Majumder et al., 2024a). While recent benchmarks have been developed to evaluate LLMs and agents in hypothesis generation and scientific question answering (Majumder et al., 2024b; Chen et al., 2024), evaluation for equation discovery and symbolic regression—one of the core tasks in scientific discovery—remains yet unexplored." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 609, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 609, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 51, + 609, + 291, + 717 + ], + "type": "text", + "content": "Symbolic Regression. Symbolic regression approaches fall into three main categories: search-based methods that explore equation spaces via evolutionary algorithms or reinforcement learning (Schmidt & Lipson, 2009; Cranmer, 2023; Petersen et al., 2021; Sun et al., 2023), learning-based methods leveraging pre-trained Transformers on synthetic data (Biggio et al., 2021; Kamienny et al., 2022), and hybrid approaches that guide search using neural priors (Landajuela et al., 2022; Shojaee et al., 2024a; Mundhenk et al., 2021;" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 67, + 542, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 67, + 542, + 117 + ], + "spans": [ + { + "bbox": [ + 303, + 67, + 542, + 117 + ], + "type": "text", + "content": "Meidani et al., 2023). While these methods have advanced the field of automated symbolic function discovery from data, they mostly lack mechanisms to incorporate scientific domain knowledge into the discovery process." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 121, + 543, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 121, + 543, + 433 + ], + "spans": [ + { + "bbox": [ + 303, + 121, + 543, + 433 + ], + "type": "text", + "content": "LLMs for Equation Discovery. Recent work has leveraged LLM-based symbolic regression to enhance scientific equation discovery through various approaches leveraging LLMs' knowledge. LLM-SR (Shojaee et al., 2024b) utilizes LLMs' embedded scientific knowledge to generate initial equation hypotheses in the form of Python programming functions, which are then refined through adaptive mutation and crossover operations with LLMs as evolutionary optimizers. In-Context Symbolic Regression (ICSR) (Merler et al., 2024) employs an iterative few-shot learning paradigm over expression candidates, using previously tested successful expressions along with their fitness scores to guide the generation of improved candidates. LaSR (Grayeli et al., 2024) alternates between hypothesis evolution, concept abstraction, and concept iteration phases to build a learned library of scientific concepts for mathematical relations needed to find the equation for a given data. The learned concepts are then used with pure evolutionary search methods (Cranmer, 2023) like PySR (Cranmer, 2023) as well as LLM-guided search to guide the equation hypothesis evolution. Scientific Generative Agent (SGA) (Ma et al., 2024) also implements a bilevel optimization framework for equation discovery where LLMs iteratively propose discrete hypotheses for scientific laws while physical simulations in PyTorch provide experimental validation and data-driven parameter optimization." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 437, + 544, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 437, + 544, + 714 + ], + "spans": [ + { + "bbox": [ + 303, + 437, + 544, + 714 + ], + "type": "text", + "content": "Symbolic Regression Benchmarks. Symbolic regression benchmarks can be broadly categorized into scientific discovery-oriented and general-purpose mathematical discovery collections. The scientific equation discovery benchmarks are primarily represented by the SRBench (La Cava et al., 2021) and SRSD (Matsubara et al., 2022) benchmarks. SRBench incorporates two key data groups for this purpose: the Feynman physics equations (Udrescu & Tegmark, 2020), and Strogatz dynamical systems (La Cava et al., 2016; Strogatz, 2018). A notable extension to this framework is presented in SRSD (Matsubara et al., 2022), which enhances the Feynman benchmark by incorporating physically meaningful sampling ranges for datapoints. The second category includes benchmarks like the Nguyen collection (Uy et al., 2011) and SRBench's black-box regression problems (La Cava et al., 2016) which include datasets without scientific contexts. However, these existing benchmarks are not well-suited for evaluating LLM-based equation discovery methods. These general-purpose benchmarks focus on the data-driven discovery of abstract mathematical functions without scientific context, while the former scientific benchmarks consist of well-known equations likely memorized by LLMs, enabling success through recitation rather than" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 290, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 290, + 128 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 290, + 128 + ], + "type": "text", + "content": "scientific reasoning and discovery. Our work extends this line of research by focusing on scientific equation discovery with LLMs, designing the first comprehensive benchmark to assess discovery capabilities of LLM-based scientific equation discovery methods beyond memorization." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 142, + 126, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 142, + 126, + 155 + ], + "spans": [ + { + "bbox": [ + 52, + 142, + 126, + 155 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 163, + 291, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 163, + 291, + 390 + ], + "spans": [ + { + "bbox": [ + 53, + 163, + 291, + 390 + ], + "type": "text", + "content": "We introduce LLM-SRBench, the first comprehensive benchmark for LLM-driven scientific equation discovery, encompassing 239 tasks across two distinct categories: LSR-Transform (111 problems derived from transformations of established physical models) and LSR-Synth (128 novel synthetic problems spanning four scientific disciplines). Our benchmark provides a standardized and multi-faceted evaluation protocol for assessing scientific equation discovery with LLMs, accommodating diverse hypothesis representations, including expression strings and programs. Extensive experiments with state-of-the-art discovery methods and various LLM backbones on LLM-SRBench show a peak performance of only " + }, + { + "bbox": [ + 53, + 163, + 291, + 390 + ], + "type": "inline_equation", + "content": "31\\%" + }, + { + "bbox": [ + 53, + 163, + 291, + 390 + ], + "type": "text", + "content": ", highlighting the significant challenges and open research opportunities in this domain. We envision that LLM-SRBench benchmark datasets and its evaluation protocol could serve as a foundation for future research, driving progress in automated equation discovery and advancing our understanding of LLMs in symbolic reasoning needed in scientific discovery." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 406, + 147, + 419 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 406, + 147, + 419 + ], + "spans": [ + { + "bbox": [ + 53, + 406, + 147, + 419 + ], + "type": "text", + "content": "Impact Statement" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 426, + 291, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 426, + 291, + 498 + ], + "spans": [ + { + "bbox": [ + 52, + 426, + 291, + 498 + ], + "type": "text", + "content": "The development and future adoption of LLM-SRBench as a benchmark for evaluating LLM-based scientific equation discovery has the potential to significantly impact the field of artificial intelligence for science and scientific discovery. There are many potential societal consequences of our work, none of which we feel must be specifically highlighted here." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 511, + 134, + 523 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 511, + 134, + 523 + ], + "spans": [ + { + "bbox": [ + 53, + 511, + 134, + 523 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 529, + 290, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 529, + 290, + 554 + ], + "spans": [ + { + "bbox": [ + 52, + 529, + 290, + 554 + ], + "type": "text", + "content": "This research was partially supported by the U.S. National Science Foundation (NSF) under Grant No. 2416728." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 569, + 111, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 569, + 111, + 582 + ], + "spans": [ + { + "bbox": [ + 53, + 569, + 111, + 582 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 588, + 291, + 718 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 53, + 588, + 291, + 659 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 588, + 291, + 659 + ], + "spans": [ + { + "bbox": [ + 53, + 588, + 291, + 659 + ], + "type": "text", + "content": "Biggio, L., Bendinelli, T., Neitz, A., Lucchi, A., and Paras-candolo, G. Neural symbolic regression that scales. In Meila, M. and Zhang, T. (eds.), Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pp. 936-945. PMLR, 18-24 Jul 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 681, + 291, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 681, + 291, + 718 + ], + "spans": [ + { + "bbox": [ + 53, + 681, + 291, + 718 + ], + "type": "text", + "content": "Chen, Z., Chen, S., Ning, Y., Zhang, Q., Wang, B., Yu, B., Li, Y., Liao, Z., Wei, C., Lu, Z., et al. Scienceagentbench: Toward rigorous assessment of language" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 542, + 716 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 315, + 67, + 542, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 67, + 542, + 91 + ], + "spans": [ + { + "bbox": [ + 315, + 67, + 542, + 91 + ], + "type": "text", + "content": "agents for data-driven scientific discovery. arXiv preprint arXiv:2410.05080, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 101, + 542, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 101, + 542, + 136 + ], + "spans": [ + { + "bbox": [ + 306, + 101, + 542, + 136 + ], + "type": "text", + "content": "Cranmer, M. Interpretable machine learning for science with pysr and symbolicregression. jl. arXiv preprint arXiv:2305.01582, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 147, + 542, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 147, + 542, + 182 + ], + "spans": [ + { + "bbox": [ + 306, + 147, + 542, + 182 + ], + "type": "text", + "content": "Du, M., Chen, Y., Wang, Z., Nie, L., and Zhang, D. Large language models for automatic equation discovery of nonlinear dynamics. Physics of Fluids, 36(9), 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 193, + 542, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 193, + 542, + 228 + ], + "spans": [ + { + "bbox": [ + 306, + 193, + 542, + 228 + ], + "type": "text", + "content": "Grayeli, A., Sehgal, A., Costilla-Reyes, O., Cranmer, M., and Chaudhuri, S. Symbolic regression with a learned concept library. arXiv preprint arXiv:2409.09359, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 238, + 542, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 238, + 542, + 310 + ], + "spans": [ + { + "bbox": [ + 306, + 238, + 542, + 310 + ], + "type": "text", + "content": "Ji, H., Wang, Q., Downey, D., and Hope, T. Scimon: Scientific inspiration machines optimized for novelty. In ACL Anthology: Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 279-299. University of Illinois Urbana-Champaign/CABBI, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 320, + 542, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 320, + 542, + 367 + ], + "spans": [ + { + "bbox": [ + 306, + 320, + 542, + 367 + ], + "type": "text", + "content": "Kamienny, P.-A., d'Ascoli, S., Lample, G., and Charton, F. End-to-end symbolic regression with transformers. In Advances in Neural Information Processing Systems, 2022." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 378, + 542, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 378, + 542, + 461 + ], + "spans": [ + { + "bbox": [ + 306, + 378, + 542, + 461 + ], + "type": "text", + "content": "La Cava, W., Danai, K., and Spector, L. Inference of compact nonlinear dynamic models by epigenetic local search. Engineering Applications of Artificial Intelligence, 55:292-306, 2016. ISSN 0952-1976. doi: https://doi.org/10.1016/j.engappai.2016.07.004. URL https://www.sciencedirect.com/science/article/pii/S0952197616301294." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 472, + 542, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 472, + 542, + 543 + ], + "spans": [ + { + "bbox": [ + 306, + 472, + 542, + 543 + ], + "type": "text", + "content": "La Cava, W., Orzechowski, P., Burlacu, B., de Franca, F., Virgolin, M., Jin, Y., Kommenda, M., and Moore, J. Contemporary symbolic regression methods and their relative performance. In Vanschoren, J. and Yeung, S. (eds.), Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, volume 1, 2021." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 553, + 542, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 553, + 542, + 624 + ], + "spans": [ + { + "bbox": [ + 306, + 553, + 542, + 624 + ], + "type": "text", + "content": "Landajuela, M., Lee, C., Yang, J., Glatt, R., Santiago, C. P., Aravena, I., Mundhenk, T. N., Mulcahy, G., and Petersen, B. K. A unified framework for deep symbolic regression. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K. (eds.), Advances in Neural Information Processing Systems, 2022." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 635, + 542, + 659 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 635, + 542, + 659 + ], + "spans": [ + { + "bbox": [ + 306, + 635, + 542, + 659 + ], + "type": "text", + "content": "Langley, P. Data-driven discovery of physical laws. Cognitive Science, 5(1):31-54, 1981." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 670, + 542, + 716 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 542, + 716 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 542, + 716 + ], + "type": "text", + "content": "Lu, C., Lu, C., Lange, R. T., Foerster, J., Clune, J., and Ha, D. The ai scientist: Towards fully automated open-ended scientific discovery. arXiv preprint arXiv:2408.06292, 2024." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 57 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 57 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 293, + 717 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 53, + 67, + 293, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 67, + 293, + 140 + ], + "spans": [ + { + "bbox": [ + 53, + 67, + 293, + 140 + ], + "type": "text", + "content": "Ma, P., Wang, T.-H., Guo, M., Sun, Z., Tenenbaum, J. B., Rus, D., Gan, C., and Matusik, W. LLM and simulation as bilevel optimizers: A new paradigm to advance physical scientific discovery. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=hz8cFsdz7P." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 148, + 293, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 148, + 293, + 195 + ], + "spans": [ + { + "bbox": [ + 53, + 148, + 293, + 195 + ], + "type": "text", + "content": "Majumder, B. P., Surana, H., Agarwal, D., Hazra, S., Sabharwal, A., and Clark, P. Data-driven discovery with large generative models. arXiv preprint arXiv:2402.13610, 2024a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 204, + 292, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 204, + 292, + 263 + ], + "spans": [ + { + "bbox": [ + 53, + 204, + 292, + 263 + ], + "type": "text", + "content": "Majumder, B. P., Surana, H., Agarwal, D., Mishra, B. D., Meena, A., Prakhar, A., Vora, T., Khot, T., Sabharwal, A., and Clark, P. Discoverybench: Towards data-driven discovery with large language models. arXiv preprint arXiv:2407.01725, 2024b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 272, + 292, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 272, + 292, + 319 + ], + "spans": [ + { + "bbox": [ + 53, + 272, + 292, + 319 + ], + "type": "text", + "content": "Matsubara, Y., Chiba, N., Igarashi, R., Tatsunori, T., and Ushiku, Y. Rethinking symbolic regression datasets and benchmarks for scientific discovery. arXiv preprint arXiv:2206.10540, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 328, + 292, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 328, + 292, + 388 + ], + "spans": [ + { + "bbox": [ + 53, + 328, + 292, + 388 + ], + "type": "text", + "content": "Meidani, K., Shojaee, P., Reddy, C. K., and Farimani, A. B. Snip: Bridging mathematical symbolic and numeric realms with unified pre-training. In The Twelfth International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 396, + 292, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 396, + 292, + 468 + ], + "spans": [ + { + "bbox": [ + 53, + 396, + 292, + 468 + ], + "type": "text", + "content": "Merler, M., Haitsiukevich, K., Dainese, N., and Marttinen, P. In-context symbolic regression: Leveraging large language models for function discovery. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop), pp. 589-606, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 476, + 292, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 476, + 292, + 525 + ], + "spans": [ + { + "bbox": [ + 53, + 476, + 292, + 525 + ], + "type": "text", + "content": "Mirzadeh, I., Alizadeh, K., Shahrokhi, H., Tuzel, O., Bengio, S., and Farajtabar, M. Gsm-symbolic: Understanding the limitations of mathematical reasoning in large language models. arXiv preprint arXiv:2410.05229, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 533, + 292, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 533, + 292, + 605 + ], + "spans": [ + { + "bbox": [ + 53, + 533, + 292, + 605 + ], + "type": "text", + "content": "Mundhenk, T. N., Landajuela, M., Glatt, R., Santiago, C. P., faissol, D., and Petersen, B. K. Symbolic regression via deep reinforcement learning enhanced genetic programming seeding. In Beygelzimer, A., Dauphin, Y., Liang, P., and Vaughan, J. W. (eds.), Advances in Neural Information Processing Systems, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 613, + 292, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 613, + 292, + 673 + ], + "spans": [ + { + "bbox": [ + 53, + 613, + 292, + 673 + ], + "type": "text", + "content": "Petersen, B. K., Larma, M. L., Mundhenk, T. N., Santiago, C. P., Kim, S. K., and Kim, J. T. Deep symbolic regression: Recovering mathematical expressions from data via risk-seeking policy gradients. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 681, + 292, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 681, + 292, + 717 + ], + "spans": [ + { + "bbox": [ + 53, + 681, + 292, + 717 + ], + "type": "text", + "content": "Reddy, C. K. and Shojaee, P. Towards scientific discovery with generative ai: Progress, opportunities, and challenges. arXiv preprint arXiv:2412.11427, 2024." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 542, + 673 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 306, + 67, + 542, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 67, + 542, + 114 + ], + "spans": [ + { + "bbox": [ + 306, + 67, + 542, + 114 + ], + "type": "text", + "content": "Schmidt, M. and Lipson, H. Distilling free-form natural laws from experimental data. Science Advance, 324 (5923):81-85, 2009. ISSN 0036-8075. doi: 10.1126/science.1165893." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 123, + 542, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 123, + 542, + 170 + ], + "spans": [ + { + "bbox": [ + 306, + 123, + 542, + 170 + ], + "type": "text", + "content": "Shojae, P., Meidani, K., Barati Farimani, A., and Reddy, C. Transformer-based planning for symbolic regression. Advances in Neural Information Processing Systems, 36, 2024a." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 178, + 542, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 178, + 542, + 226 + ], + "spans": [ + { + "bbox": [ + 306, + 178, + 542, + 226 + ], + "type": "text", + "content": "Shojaee, P., Meidani, K., Gupta, S., Farimani, A. B., and Reddy, C. K. Llm-sr: Scientific equation discovery via programming with large language models. arXiv preprint arXiv:2404.18400, 2024b." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 235, + 542, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 235, + 542, + 270 + ], + "spans": [ + { + "bbox": [ + 306, + 235, + 542, + 270 + ], + "type": "text", + "content": "Strogatz, S. H. Nonlinear dynamics and chaos with student solutions manual: With applications to physics, biology, chemistry, and engineering. CRC press, 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 278, + 542, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 278, + 542, + 327 + ], + "spans": [ + { + "bbox": [ + 306, + 278, + 542, + 327 + ], + "type": "text", + "content": "Sun, F., Liu, Y., Wang, J.-X., and Sun, H. Symbolic physics learner: Discovering governing equations via monte carlo tree search. In The Eleventh International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 334, + 542, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 334, + 542, + 382 + ], + "spans": [ + { + "bbox": [ + 306, + 334, + 542, + 382 + ], + "type": "text", + "content": "Udrescu, S.-M. and Tegmark, M. Ai feynman: A physics-inspired method for symbolic regression. Science Advances, 6(16):eaay2631, 2020. doi: 10.1126/sciadv.aay2631." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 390, + 542, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 390, + 542, + 449 + ], + "spans": [ + { + "bbox": [ + 306, + 390, + 542, + 449 + ], + "type": "text", + "content": "Uy, N. Q., Hoai, N. X., O'Neill, M., McKay, R. I., and Galván-López, E. Semantically-based crossover in genetic programming: application to real-valued symbolic regression. Genetic Programming and Evolvable Machines, 12:91-119, 2011." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 457, + 542, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 457, + 542, + 493 + ], + "spans": [ + { + "bbox": [ + 306, + 457, + 542, + 493 + ], + "type": "text", + "content": "Virgolin, M. and Pissis, S. P. Symbolic regression is NP-hard. Transactions on Machine Learning Research, 2022. ISSN 2835-8856." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 501, + 542, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 501, + 542, + 561 + ], + "spans": [ + { + "bbox": [ + 306, + 501, + 542, + 561 + ], + "type": "text", + "content": "Wu, Z., Qiu, L., Ross, A., Akyurek, E., Chen, B., Wang, B., Kim, N., Andreas, J., and Kim, Y. Reasoning or reciting? exploring the capabilities and limitations of language models through counterfactual tasks. arXiv preprint arXiv:2307.02477, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 569, + 542, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 569, + 542, + 617 + ], + "spans": [ + { + "bbox": [ + 306, + 569, + 542, + 617 + ], + "type": "text", + "content": "Xie, C., Huang, Y., Zhang, C., Yu, D., Chen, X., Lin, B. Y., Li, B., Ghazi, B., and Kumar, R. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 625, + 542, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 625, + 542, + 673 + ], + "spans": [ + { + "bbox": [ + 306, + 625, + 542, + 673 + ], + "type": "text", + "content": "Zhang, Y., Zheng, K., Liu, F., Zhang, Q., and Wang, Z. Autoturb: Using large language models for automatic algebraic model discovery of turbulence closure. arXiv preprint arXiv:2410.10657, 2024." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 57 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 57 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 66, + 105, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 66, + 105, + 79 + ], + "spans": [ + { + "bbox": [ + 53, + 66, + 105, + 79 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 87, + 147, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 87, + 147, + 99 + ], + "spans": [ + { + "bbox": [ + 53, + 87, + 147, + 99 + ], + "type": "text", + "content": "A. Dataset Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 107, + 144, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 107, + 144, + 119 + ], + "spans": [ + { + "bbox": [ + 53, + 107, + 144, + 119 + ], + "type": "text", + "content": "A.1. LSR-Transform" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 126, + 544, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 126, + 544, + 331 + ], + "spans": [ + { + "bbox": [ + 52, + 126, + 544, + 331 + ], + "type": "text", + "content": "The LSR-Transform is the first category of datasets in LLM-SRBench, designed to evaluate the ability of LLM-based scientific equation discovery methods in less common mathematical forms. This dataset challenges LLM-based discovery methods to avoid reliance on memorization of well-known representations and instead reason through unfamiliar instantiations of familiar problems. This approach is motivated by the observation that LLMs often struggle with unfamiliar instantiations of otherwise familiar problems, as highlighted by recent studies on the fragility of LLM reasoning (Mirzadeh et al., 2024). By transforming existing benchmark problems into alternative mathematical representations, LSR-Transform provides a rigorous testbed to evaluate how well LLM-based discovery methods perform in both (1) semantic scientific reasoning, which draws on LLMs' built-in scientific knowledge, and (2) data-driven reasoning, which utilizes experimental feedback for equation discovery. LSR-Transform builds on the Feynman benchmark (Udrescu & Tegmark, 2020), a widely used standard benchmark in scientific equation discovery and symbolic regression. The Feynman benchmark consists of 100 physics equations from Feynman Lecture Series" + }, + { + "bbox": [ + 52, + 126, + 544, + 331 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 52, + 126, + 544, + 331 + ], + "type": "text", + "content": ", representing fundamental laws in physics. While the Feynman benchmark has been instrumental in evaluating symbolic regression methods, it primarily tests the ability to recover equations in their standard, well-known forms which are mostly memorized by LLMs. However, real-world scientific equation discovery often involves reasoning about unknown equations based on domain expertise and knowledge from literature as well as empirical data observations. To address this gap, LSR-Transform transforms the original Feynman equations into less common alternative mathematical forms of the same physical problem by switching input-output variables and symbolically solving for the new target variables." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 106, + 341, + 487, + 597 + ], + "blocks": [ + { + "bbox": [ + 106, + 341, + 487, + 597 + ], + "lines": [ + { + "bbox": [ + 106, + 341, + 487, + 597 + ], + "spans": [ + { + "bbox": [ + 106, + 341, + 487, + 597 + ], + "type": "image", + "image_path": "8ba728d78f2919727c08b3690d02bb7aec14be00772e7730233d78780cba6800.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 610, + 536, + 623 + ], + "lines": [ + { + "bbox": [ + 52, + 610, + 536, + 623 + ], + "spans": [ + { + "bbox": [ + 52, + 610, + 536, + 623 + ], + "type": "text", + "content": "Figure 7. Examples of how LLM-SRBench (LSR-Transform) problems can be obtained from original Feynman benchmark problems." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 631, + 544, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 631, + 544, + 700 + ], + "spans": [ + { + "bbox": [ + 52, + 631, + 544, + 700 + ], + "type": "text", + "content": "Figure 7 demonstrates the equation transformation process, showing examples of the original Feynman problems (along with their scientific descriptions) and their potential transformed versions. These examples show the dataset's design for altering the mathematical representation of the same problem by analytically solving the equations with respect to different input variables. For instance, the original harmonic oscillator energy equation " + }, + { + "bbox": [ + 52, + 631, + 544, + 700 + ], + "type": "inline_equation", + "content": "E = \\frac{1}{4} m(\\omega^2 + \\omega_0^2)x^2" + }, + { + "bbox": [ + 52, + 631, + 544, + 700 + ], + "type": "text", + "content": " is transformed into symbolic representation of " + }, + { + "bbox": [ + 52, + 631, + 544, + 700 + ], + "type": "inline_equation", + "content": "m = \\frac{4E}{(\\omega^2 + \\omega_0^2)x^2}" + }, + { + "bbox": [ + 52, + 631, + 544, + 700 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 631, + 544, + 700 + ], + "type": "inline_equation", + "content": "\\omega = \\sqrt{\\frac{4E}{mx^2} - \\omega_0^2}" + }, + { + "bbox": [ + 52, + 631, + 544, + 700 + ], + "type": "text", + "content": " where the target variable is switched from energy " + }, + { + "bbox": [ + 52, + 631, + 544, + 700 + ], + "type": "inline_equation", + "content": "(E)" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 64, + 706, + 335, + 718 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 706, + 335, + 718 + ], + "spans": [ + { + "bbox": [ + 64, + 706, + 335, + 718 + ], + "type": "text", + "content": " float: \"\"Evaluate the equation on data observations.\"\"\"" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "code_body" + } + ], + "index": 13, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 85, + 492, + 350 + ], + "blocks": [ + { + "bbox": [ + 142, + 74, + 453, + 85 + ], + "lines": [ + { + "bbox": [ + 142, + 74, + 453, + 85 + ], + "spans": [ + { + "bbox": [ + 142, + 74, + 453, + 85 + ], + "type": "text", + "content": "Table 2. Implementation details of LLM-based scientific equation discovery methods." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 85, + 492, + 350 + ], + "lines": [ + { + "bbox": [ + 106, + 85, + 492, + 350 + ], + "spans": [ + { + "bbox": [ + 106, + 85, + 492, + 350 + ], + "type": "table", + "html": "
MethodParameters
Direct Prompting (DataBlind)Temperature τ = 0.85 equation program hypotheses sampled from LLM for initial promptNo access to data for data-driven refinementTime limit T = 30s per program hypothesis execution,BFGS optimizer from Scipy for parameter optimization of equation skeletons
SGA (Ma et al., 2024)PyTorch-based implementation of model and torch nn. Module classMean square error loss for data-driven feedback in agentic searchAdam optimizer in PyTorch for differential parameter optimization of equation skeletons
LaSR (Grayeli et al., 2024)Iterations = 25Cycles per iteration = 550Populations = 10Population size = 33Maximum size = 30Operators: +, *, -, /, ∧, exp, log, sqrt, sin, cos, tan, coshLLM weights: llm_mutate =0.005, llm_crossover =0.005, llm_gen_random =0.005Top-K = 20 concepts from libraryDefault configuration of PySR for parameter optimization
LLM-SR (Shojaee et al., 2024b)Temperature τ = 0.8Batch size b = 4 equation programs per prompte = 4 parallel evaluatorsTime limit T = 30s per program hypothesis,Memory limit M = 2GBm = 10 islands for population diversity through searchk = 2 in-context examples per promptMaximum 10 parameters per equation skeletonBFGS optimizer from Scipy for parameter optimization of equation skeletons
", + "image_path": "76b6590aa51f2645dd5819d5bfe3ca1e5f13e79490781dd4e4338503b9cb123c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "code", + "bbox": [ + 88, + 372, + 323, + 518 + ], + "blocks": [ + { + "bbox": [ + 88, + 372, + 323, + 518 + ], + "lines": [ + { + "bbox": [ + 88, + 372, + 323, + 518 + ], + "spans": [ + { + "bbox": [ + 88, + 372, + 323, + 518 + ], + "type": "text", + "content": "# Load data observations \ninputs, outputs = data['inputs'], data['outputs'] \nX = inputs \n# Optimize parameters based on data \nfrom scipy.optimize import minimize \ndef loss.params): \n y_pred = equation(*X, params) \n return np.mean((y_pred - outputs) ** 2) \nloss_partial = lambda params: loss.params) \nresult = minimize(loss_partial, [1.0]*MAX_NPARAMS, method='BFGS') \n# Return evaluation score \noptimized.params = result.x \nloss = result(fun \nif np.isnan(loss) or np.isinf(loss): \n return None \nelse: \n return -loss" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 53, + 532, + 328, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 532, + 328, + 544 + ], + "spans": [ + { + "bbox": [ + 53, + 532, + 328, + 544 + ], + "type": "text", + "content": "3. Equation example specification as Python programming function." + } + ] + } + ], + "index": 4 + }, + { + "type": "code", + "bbox": [ + 77, + 555, + 499, + 716 + ], + "blocks": [ + { + "bbox": [ + 77, + 555, + 499, + 716 + ], + "lines": [ + { + "bbox": [ + 77, + 555, + 499, + 716 + ], + "spans": [ + { + "bbox": [ + 77, + 555, + 499, + 716 + ], + "type": "text", + "content": "def equation_v0(\\(INPUT VAR[0], ..., \\)INPUT VAR[N], params):\n ''' Mathematical function for {$OUTPUT VAR_DESC}\nArgs:\n $INPUT VAR[0]: A numpy array representing observations of {$INPUT VAR_DESC[0]}.\n ...\n $INPUT VAR[N]: A numpy array representing observations of {$INPUT VAR_DESC[N]}.\nparams: Array of numeric constants or parameters to be optimized\nReturn: A numpy array representing {$OUTPUT VAR_DESC} as the result of applying the mathematical function to the inputs.\n'''# Equation example 1 logic as function body\n...\ndef equation_v1(\\)INPUT VAR[0], ..., \\)INPUT VAR[N], params):\n # Equation example 2\n...\n## Function to be completed" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 57 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 57 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 77, + 70, + 298, + 86 + ], + "blocks": [ + { + "bbox": [ + 77, + 70, + 298, + 86 + ], + "lines": [ + { + "bbox": [ + 77, + 70, + 298, + 86 + ], + "spans": [ + { + "bbox": [ + 77, + 70, + 298, + 86 + ], + "type": "text", + "content": "def equation(" + }, + { + "bbox": [ + 77, + 70, + 298, + 86 + ], + "type": "inline_equation", + "content": "INPUT VAR[0], ...," + }, + { + "bbox": [ + 77, + 70, + 298, + 86 + ], + "type": "text", + "content": "INPUT VAR[N], params):\n ''' Improvement version of equation_v0 and equation_v1'''" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 52, + 104, + 111, + 114 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 104, + 111, + 114 + ], + "spans": [ + { + "bbox": [ + 52, + 104, + 111, + 114 + ], + "type": "text", + "content": "C.2.2. LASR" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 123, + 542, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 123, + 542, + 147 + ], + "spans": [ + { + "bbox": [ + 51, + 123, + 542, + 147 + ], + "type": "text", + "content": "We use the default prompts from LaSR's (Grayeli et al., 2024) public code repository (https://github.com/trishullah/LibraryAugmentedSymbolicRegression.jl), which includes:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 162, + 542, + 289 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 60, + 162, + 413, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 162, + 413, + 174 + ], + "spans": [ + { + "bbox": [ + 60, + 162, + 413, + 174 + ], + "type": "text", + "content": "1. The LLMINIT prompt, which is used in an LLM-augmented initialization operation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 60, + 182, + 411, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 182, + 411, + 194 + ], + "spans": [ + { + "bbox": [ + 60, + 182, + 411, + 194 + ], + "type": "text", + "content": "2. LLMMUTATION prompt is used to mutate an expression based on a set of concepts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 59, + 201, + 542, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 201, + 542, + 226 + ], + "spans": [ + { + "bbox": [ + 59, + 201, + 542, + 226 + ], + "type": "text", + "content": "3. LLMCROSSOVER prompt is used to construct a new expression from the crossover of two sampled expressions based on a set of concepts." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 59, + 234, + 541, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 234, + 541, + 258 + ], + "spans": [ + { + "bbox": [ + 59, + 234, + 541, + 258 + ], + "type": "text", + "content": "4. LLM Concept Abstraction prompt in CONCEPTABSTRACTION function, which extracts a natural language concept from current trends of hypotheses at each iteration." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 59, + 266, + 542, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 266, + 542, + 289 + ], + "spans": [ + { + "bbox": [ + 59, + 266, + 542, + 289 + ], + "type": "text", + "content": "5. LLM Concept Evolution prompt in CONCEPTEVOLUTION function, which creates a new concept that follows a set of ideas in the current library." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 52, + 304, + 279, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 304, + 279, + 318 + ], + "spans": [ + { + "bbox": [ + 52, + 304, + 279, + 318 + ], + "type": "text", + "content": "In the following, we provide examples of these prompts." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 323, + 140, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 323, + 140, + 335 + ], + "spans": [ + { + "bbox": [ + 53, + 323, + 140, + 335 + ], + "type": "text", + "content": "1. LLMINIT prompt." + } + ] + } + ], + "index": 11 + }, + { + "type": "code", + "bbox": [ + 77, + 339, + 507, + 471 + ], + "blocks": [ + { + "bbox": [ + 77, + 339, + 507, + 471 + ], + "lines": [ + { + "bbox": [ + 77, + 339, + 507, + 471 + ], + "spans": [ + { + "bbox": [ + 77, + 339, + 507, + 471 + ], + "type": "text", + "content": " \nYou are a helpful assistant that proposes a mathematical expression by following three provided suggestions. An expression must consist of the following variables: {{variables}}. All constants will be represented with the symbol C. Each expression will only use these operators: {{operators}}. \n \nSuggestion 1: {{assume1}} \nSuggestion 2: {{assume2}} \nSuggestion 3: {{assume3}} \nPropose {{N}} expressions that would be appropriate given the suggestions. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: \n``'json \n[\"expr1\", \"expr2\", ... \"expr{N}\"] \n]" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "code", + "guess_lang": "handlebars" + }, + { + "bbox": [ + 52, + 486, + 168, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 486, + 168, + 498 + ], + "spans": [ + { + "bbox": [ + 52, + 486, + 168, + 498 + ], + "type": "text", + "content": "2. LLMMUTATION prompt." + } + ] + } + ], + "index": 13 + }, + { + "type": "code", + "bbox": [ + 77, + 504, + 507, + 649 + ], + "blocks": [ + { + "bbox": [ + 77, + 504, + 507, + 649 + ], + "lines": [ + { + "bbox": [ + 77, + 504, + 507, + 649 + ], + "spans": [ + { + "bbox": [ + 77, + 504, + 507, + 649 + ], + "type": "text", + "content": " \nYou are a helpful assistant that mutates a mathematical expression by following a few provided suggestions. You will be given three suggestions and a single reference expression to mutate. \nAn expression must consist of the following variables: " + }, + { + "bbox": [ + 77, + 504, + 507, + 649 + ], + "type": "inline_equation", + "content": "\\{\\{variables\\}\\}" + }, + { + "bbox": [ + 77, + 504, + 507, + 649 + ], + "type": "text", + "content": " . All constants will be represented with the symbol C. Each expression will only use these operators: " + }, + { + "bbox": [ + 77, + 504, + 507, + 649 + ], + "type": "inline_equation", + "content": "\\{\\{\\mathrm{operators}\\}\\}" + }, + { + "bbox": [ + 77, + 504, + 507, + 649 + ], + "type": "text", + "content": " \n \nSuggestion 1:{\\{assume1\\}} \nSuggestion 2:{\\{assume2\\}} \nSuggestion 3:{\\{assume3\\}} \nReference Expression:{\\{expr\\}} \nPropose " + }, + { + "bbox": [ + 77, + 504, + 507, + 649 + ], + "type": "inline_equation", + "content": "\\{\\{\\mathbf{N}\\}\\}" + }, + { + "bbox": [ + 77, + 504, + 507, + 649 + ], + "type": "text", + "content": " expressions that would be appropriate given the suggestions and references. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: \n``'json \n[\"expr1\", \"expr2\", ... \"expr.{N}\"] \n]" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "code_body" + } + ], + "index": 14, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 53, + 664, + 174, + 676 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 664, + 174, + 676 + ], + "spans": [ + { + "bbox": [ + 53, + 664, + 174, + 676 + ], + "type": "text", + "content": "3. LLMCROSSOVER prompt." + } + ] + } + ], + "index": 15 + }, + { + "type": "code", + "bbox": [ + 77, + 681, + 507, + 716 + ], + "blocks": [ + { + "bbox": [ + 77, + 681, + 507, + 716 + ], + "lines": [ + { + "bbox": [ + 77, + 681, + 507, + 716 + ], + "spans": [ + { + "bbox": [ + 77, + 681, + 507, + 716 + ], + "type": "text", + "content": " \nYou are a helpful assistant that recombines two mathematical expressions by following a few provided suggestions. You will be given three suggestions and two reference expressions to recombine. \nAn expression must consist of the following variables: " + }, + { + "bbox": [ + 77, + 681, + 507, + 716 + ], + "type": "inline_equation", + "content": "\\{\\{variables\\}\\}" + }, + { + "bbox": [ + 77, + 681, + 507, + 716 + ], + "type": "text", + "content": " . All constants will be represented with the symbol C. Each expression will only use these operators: " + }, + { + "bbox": [ + 77, + 681, + 507, + 716 + ], + "type": "inline_equation", + "content": "\\{\\{\\mathrm{operators}\\}\\}" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "code_body" + } + ], + "index": 16, + "sub_type": "code", + "guess_lang": "latex" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 77, + 76, + 504, + 189 + ], + "blocks": [ + { + "bbox": [ + 77, + 76, + 504, + 189 + ], + "lines": [ + { + "bbox": [ + 77, + 76, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 77, + 76, + 504, + 189 + ], + "type": "text", + "content": " \nSuggestion 1: {{assume1}} \nSuggestion 2: {{assume2}} \nSuggestion 3: {{assume3}} \nReference Expression 1: {{expr1}} \nReference Expression 2: {{expr2}} \nPropose {{N}} expressions that would be appropriate given the suggestions and references. Provide short commentary for each of your decisions. End with a JSON list that enumerates the proposed expressions following this format: \n``'json \n[\"expr1\", \"expr2\", ... \"expr.{N}\" \n]" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "handlebars" + }, + { + "bbox": [ + 52, + 204, + 205, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 204, + 205, + 217 + ], + "spans": [ + { + "bbox": [ + 52, + 204, + 205, + 217 + ], + "type": "text", + "content": "4. LLM Concept Abstraction prompt." + } + ] + } + ], + "index": 2 + }, + { + "type": "code", + "bbox": [ + 77, + 222, + 507, + 259 + ], + "blocks": [ + { + "bbox": [ + 77, + 222, + 507, + 259 + ], + "lines": [ + { + "bbox": [ + 77, + 222, + 507, + 259 + ], + "spans": [ + { + "bbox": [ + 77, + 222, + 507, + 259 + ], + "type": "text", + "content": " \nYou are a helpful assistant that hypothesizes about the underlying assumptions that generated a list of good and bad mathematical expressions in detailed ways. My ultimate goal is to discover what assumptions generated the observed good mathematical expressions and excludes the bad mathematical expressions. Focus more on the good expressions, their mathematical structure, and any relation to physical concepts. Note that capital C represents an arbitrary constant" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 77, + 264, + 501, + 430 + ], + "blocks": [ + { + "bbox": [ + 77, + 264, + 501, + 430 + ], + "lines": [ + { + "bbox": [ + 77, + 264, + 501, + 430 + ], + "spans": [ + { + "bbox": [ + 77, + 264, + 501, + 430 + ], + "type": "text", + "content": " \nGood Expression 1: {gexpr1} \nGood Expression 2: {gexpr2} \nGood Expression 3: {gexpr3} \nGood Expression 4: {gexpr4} \nGood Expression 5: {gexpr5} \nBad Expression 1: {bexpr1} \nBad Expression 2: {bexpr2} \nBad Expression 3: {bexpr3} \nBad Expression 4: {bexpr4} \nBad Expression 5: {bexpr5} \nPropose " + }, + { + "bbox": [ + 77, + 264, + 501, + 430 + ], + "type": "inline_equation", + "content": "\\{\\{N\\}\\}" + }, + { + "bbox": [ + 77, + 264, + 501, + 430 + ], + "type": "text", + "content": " hypotheses that would be appropriate given the expressions. Provide short commentary for each of your decisions. Do not talk about topics related to the simplicity or complexity of the expressions. I want ideas that are unique and interesting enough to amaze the world's best mathematicians. End with a JSON list that enumerates the proposed hypotheses following this format: \n``'json \n[\"hyp1\", \"hyp2\", ... \"hyp.{N}]'' \n]" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 52, + 446, + 198, + 458 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 446, + 198, + 458 + ], + "spans": [ + { + "bbox": [ + 52, + 446, + 198, + 458 + ], + "type": "text", + "content": "5. LLM Concept Evolution prompt." + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 77, + 465, + 511, + 495 + ], + "blocks": [ + { + "bbox": [ + 77, + 465, + 511, + 495 + ], + "lines": [ + { + "bbox": [ + 77, + 465, + 511, + 495 + ], + "spans": [ + { + "bbox": [ + 77, + 465, + 511, + 495 + ], + "type": "text", + "content": " You are an insightful assistant skilled in logical reasoning and deduction. Your task is to analyze a set of ideas and infer nontrivial conclusions that logically follow from them. The ultimate goal is to uncover underlying principles or properties of the hidden expressions. Focus on providing logical conclusions that are unique, interesting, and profound." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 77, + 499, + 511, + 624 + ], + "blocks": [ + { + "bbox": [ + 77, + 499, + 511, + 624 + ], + "lines": [ + { + "bbox": [ + 77, + 499, + 511, + 624 + ], + "spans": [ + { + "bbox": [ + 77, + 499, + 511, + 624 + ], + "type": "text", + "content": " \nIdea 1:{ideal} \nIdea 2:{idea2} \nIdea 3:{idea3} \nIdea 4:{idea4} \nIdea 5:{idea5} \nBased on these ideas, deduce " + }, + { + "bbox": [ + 77, + 499, + 511, + 624 + ], + "type": "inline_equation", + "content": "\\{\\{N\\}\\}" + }, + { + "bbox": [ + 77, + 499, + 511, + 624 + ], + "type": "text", + "content": " logical conclusions or hypotheses that directly follow from them. Provide a brief explanation for each conclusion, highlighting the logical connections between the ideas. Avoid discussing topics related to the simplicity or complexity of the expressions. Conclude with a JSON list that enumerates the proposed conclusions in the following format: \n``'json \n[\"Conclusion 1\", \"Conclusion 2\", ... \"Conclusion {{N}}]\" \n]" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 53, + 645, + 107, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 645, + 107, + 656 + ], + "spans": [ + { + "bbox": [ + 53, + 645, + 107, + 656 + ], + "type": "text", + "content": "C.2.3. SGA" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 664, + 544, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 664, + 544, + 698 + ], + "spans": [ + { + "bbox": [ + 52, + 664, + 544, + 698 + ], + "type": "text", + "content": "The following prompts are used in our implementation of SGA (Ma et al., 2024) for scientific equation discovery tasks, following the original implementation SGA's public code repository (https://github.com/PingchuanMa/SGA), which includes:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 706, + 151, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 706, + 151, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 706, + 151, + 717 + ], + "type": "text", + "content": "System prompt for task." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 77, + 76, + 394, + 141 + ], + "blocks": [ + { + "bbox": [ + 77, + 76, + 394, + 141 + ], + "lines": [ + { + "bbox": [ + 77, + 76, + 394, + 141 + ], + "spans": [ + { + "bbox": [ + 77, + 76, + 394, + 141 + ], + "type": "text", + "content": "You are an intelligent AI assistant for coding and scientific equation discovery. \nYou are tasked with discovering mathematical function structures for scientific systems. \nFollow the user's requirements carefully and make sure you understand them. \nKeep your answers short and to the point. \nDo not provide any information that is not requested.. \nAlways document your code as comments to explain the reason behind them. \nUse Markdown to format your solution. \nYou are very familiar with Python and PyTorch. \nDo not use any external libraries other than the libraries used in the examples." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + }, + { + "bbox": [ + 52, + 155, + 304, + 167 + ], + "lines": [ + { + "bbox": [ + 52, + 155, + 304, + 167 + ], + "spans": [ + { + "bbox": [ + 52, + 155, + 304, + 167 + ], + "type": "text", + "content": "Code formatting prompt for scientific equation discovery task." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 77, + 178, + 503, + 425 + ], + "blocks": [ + { + "bbox": [ + 77, + 178, + 503, + 425 + ], + "lines": [ + { + "bbox": [ + 77, + 178, + 503, + 425 + ], + "spans": [ + { + "bbox": [ + 77, + 178, + 503, + 425 + ], + "type": "text", + "content": "## PyTorch Tips\n1. When working with tensors, always use PyTorch's operators (such as 'torch.exp', 'torch.cos', 'torch.sqrt', ...) to ensure compatibility and optimal performance.\n2. In PyTorch, operator input arguments must be tensors, not floats.\n## Code Requirements\n1. The only library allowed is PyTorch. Follow the format provided by the user examples.\n2. Annotate the size of the tensor as comment after each tensor operation. For example, # (B, 3, 3).\n3. Separate the code into parameters that can be tuned with differentiable optimization and the symbolic expression represented by PyTorch code. Define them respectively in the\n5. The proposed code must strictly follow the structure and function signatures below:\n``'python\nimport torch\nimport torch(nn as nn)\nclass SymbolicEquation(nn.Module):\n def __init__(self, {PARAM_INPUTS}):\n Define trainable continuous parameters for differentiable optimization.\n Tentatively initialize the parameters with the default values in args.\n Args:\n {PARAM_DESCRIPTION}\n super().__init__()\n {PARAM_INIT}\n def forward(self, {INPUT_variables}) -> torch.Tensor:\n {FORWARD_FUNCTIONDescriptions}" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "python" + }, + { + "type": "code", + "bbox": [ + 77, + 431, + 508, + 467 + ], + "blocks": [ + { + "bbox": [ + 77, + 431, + 508, + 467 + ], + "lines": [ + { + "bbox": [ + 77, + 431, + 508, + 467 + ], + "spans": [ + { + "bbox": [ + 77, + 431, + 508, + 467 + ], + "type": "text", + "content": "1. Analyze step-by-step what the potential problem is in the previous iterations based on the feedback. Think about why the results from previous iterations mismatched with the ground truth. Do not give advice about how to optimize. Focus on the formulation of the scientific equation. Start this section with \"#Analysis\". Analyze all iterations individually, and start the subsection for each iteration with \"#Iteration N\", where N stands for the index. Remember to analyze every iteration in the history." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 76, + 473, + 511, + 509 + ], + "blocks": [ + { + "bbox": [ + 76, + 473, + 511, + 509 + ], + "lines": [ + { + "bbox": [ + 76, + 473, + 511, + 509 + ], + "spans": [ + { + "bbox": [ + 76, + 473, + 511, + 509 + ], + "type": "text", + "content": "2. Think step-by-step what you need to do in this iteration. Think about what is needed to improve performance. If the analysis suggests specific functional forms or constraints, think about how these will be incorporated into the symbolic equation. Think about how to separate your algorithm into a continuous parameter part and a symbolic expression model part. Describe your plan in pseudo-code, written out in great detail. Remember to update the default values of the trainable parameters based on previous optimizations. Start this section with \"# Step-by-Step Plan\"." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + }, + { + "bbox": [ + 52, + 543, + 230, + 555 + ], + "lines": [ + { + "bbox": [ + 52, + 543, + 230, + 555 + ], + "spans": [ + { + "bbox": [ + 52, + 543, + 230, + 555 + ], + "type": "text", + "content": "Context prompt for each scientific problem." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_caption" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 76, + 514, + 504, + 529 + ], + "blocks": [ + { + "bbox": [ + 76, + 514, + 504, + 529 + ], + "lines": [ + { + "bbox": [ + 76, + 514, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 76, + 514, + 504, + 529 + ], + "type": "text", + "content": "3. Output the code in a single code block ''``python ... ''`` with detailed comments in the code block. Do not add any trailing comments before or after the code block. Start this section with \"# Code\"." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 77, + 567, + 119, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 567, + 119, + 574 + ], + "spans": [ + { + "bbox": [ + 77, + 567, + 119, + 574 + ], + "type": "text", + "content": "# # # Context" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 76, + 582, + 511, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 582, + 511, + 624 + ], + "spans": [ + { + "bbox": [ + 76, + 582, + 511, + 624 + ], + "type": "text", + "content": "The objective is to construct a mathematical expression that accurately maps input variables to a target output based on a provided dataset. The task involves filling in a code block to define a symbolic expression or model that minimizes the difference between predicted and ground-truth outputs. The code block defines a class with two functions: one for parameters within the expression and another for generating or modifying the symbolic structure of the expression. Feedback is provided in the form of metrics measuring the error between the model's predictions and the ground-truth values, as well as guidance on structural improvements to the symbolic expression." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 77, + 629, + 348, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 629, + 348, + 639 + ], + "spans": [ + { + "bbox": [ + 77, + 629, + 348, + 639 + ], + "type": "text", + "content": "The expression represents " + }, + { + "bbox": [ + 77, + 629, + 348, + 639 + ], + "type": "inline_equation", + "content": "\\{" + }, + { + "bbox": [ + 77, + 629, + 348, + 639 + ], + "type": "text", + "content": " OUTPUT VAR DESC\\}, given data on " + }, + { + "bbox": [ + 77, + 629, + 348, + 639 + ], + "type": "inline_equation", + "content": "\\{" + }, + { + "bbox": [ + 77, + 629, + 348, + 639 + ], + "type": "text", + "content": " INPUTS DESC\\}." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 661, + 234, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 661, + 234, + 675 + ], + "spans": [ + { + "bbox": [ + 52, + 661, + 234, + 675 + ], + "type": "text", + "content": "D. Additional Results and Analysis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 681, + 544, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 681, + 544, + 718 + ], + "spans": [ + { + "bbox": [ + 51, + 681, + 544, + 718 + ], + "type": "text", + "content": "Detailed Numeric Accuracy Analysis. While Table 1 presents median Normalized Mean Squared Error for each method-LLM combination across LLM-SRBench datasets, Figure 12 provides a more comprehensive view of error distributions across all samples. These box plots illustrate performance variations across LLM-SRBench datasets from two perspectives:" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 67, + 543, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 67, + 543, + 140 + ], + "spans": [ + { + "bbox": [ + 51, + 67, + 543, + 140 + ], + "type": "text", + "content": "comparing different equation discovery methods with GPT-4o-mini as the LLM backbone, and examining different LLM backbones when using LLM-SR method. The substantial variance in NMSE performance across samples reflects the diverse complexity inherent in our benchmark—stemming from both the varying mathematical transformations in LSR-Transform and the different combinations of known and synthetic terms in LSR-Synth datasets. Notably, the relative difficulty of datasets varies across methods and LLM backbones, suggesting that different methods and LLMs possess distinct capabilities in terms of leveraging domain knowledge, reasoning, and generating novel hypotheses." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 80, + 152, + 299, + 322 + ], + "blocks": [ + { + "bbox": [ + 80, + 152, + 299, + 322 + ], + "lines": [ + { + "bbox": [ + 80, + 152, + 299, + 322 + ], + "spans": [ + { + "bbox": [ + 80, + 152, + 299, + 322 + ], + "type": "image", + "image_path": "f5679f3c4d121b4ea5c20faf879d882e4987e7cc96f2c511c1b25316fcd262e4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 335, + 543, + 368 + ], + "lines": [ + { + "bbox": [ + 51, + 335, + 543, + 368 + ], + "spans": [ + { + "bbox": [ + 51, + 335, + 543, + 368 + ], + "type": "text", + "content": "Figure 12. Normalized Mean Squared Error (NMSE) of discovered equations in various domains of LLM-SRBench with respect to (left) different equation discovery methods using GPT-4omini LLM backbone, and (right) different LLM backbones using LLM-SR method" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 300, + 152, + 518, + 322 + ], + "blocks": [ + { + "bbox": [ + 300, + 152, + 518, + 322 + ], + "lines": [ + { + "bbox": [ + 300, + 152, + 518, + 322 + ], + "spans": [ + { + "bbox": [ + 300, + 152, + 518, + 322 + ], + "type": "image", + "image_path": "7287530c601c8f7a06f0551faaeaa9113407b59a179cd6bf6b36c8bedb772eb5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 387, + 543, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 387, + 543, + 472 + ], + "spans": [ + { + "bbox": [ + 51, + 387, + 543, + 472 + ], + "type": "text", + "content": "Symbolic Accuracy and Generalization. For scientific equation discovery methods, both symbolic accuracy and out-of-domain generalization serve as crucial evaluation metrics, reflecting the methods' ability to uncover true governing equations. Figure 13 examines the relationship between these metrics, plotting symbolic accuracy against both OOD accuracy and OOD NMSE across all method-LLM-domain combinations in LSR-Synth. The strong correlation observed between symbolic and OOD performance yields two important insights: first, it establishes OOD evaluation as a powerful metric for assessing the discovery of generalizable equations, an approach historically underutilized in symbolic regression; second, it validates our LLM-based symbolic evaluation approach through its strong alignment with numeric generalization performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 484, + 543, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 484, + 543, + 593 + ], + "spans": [ + { + "bbox": [ + 51, + 484, + 543, + 593 + ], + "type": "text", + "content": "Qualitative Analysis of Outputs. To provide deeper insights into the behavior of different discovery methods, Figure 14 illustrates their final discovered hypotheses on a biological population growth problem (BPG0) using Llama-3.1-8B as the LLM backbone. Direct Prompting (Figure 14(a)) generates equations that capture basic population dynamics, demonstrating LLMs' ability to propose scientifically plausible structures. SGA's solution (Figure 14(b)) successfully incorporates one of the common population growth terms while exploring additional structural components. LaSR (Figure 14(c)) discovers an equation structure that combines multiple interaction terms, though it differs from established scientific formulations. LLM-SR (Figure 14(d)) combines both standard population dynamics terms and synthetic components in its solution. These examples demonstrate the diverse approaches methods take in balancing scientific interpretability with mathematical expressiveness when discovering equation structures." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 607, + 241, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 607, + 241, + 620 + ], + "spans": [ + { + "bbox": [ + 52, + 607, + 241, + 620 + ], + "type": "text", + "content": "E. Discussion and Future Directions" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 627, + 543, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 627, + 543, + 700 + ], + "spans": [ + { + "bbox": [ + 51, + 627, + 543, + 700 + ], + "type": "text", + "content": "Our findings from LLM-SRBench reveal several key insights that inform the design of future LLMs for scientific discovery applications. Scientific equation discovery remains a challenging problem for LLMs, requiring a complex interplay of domain knowledge, search capabilities with data-driven feedback, and mathematical manipulation skills. Our results demonstrate that this problem poses significant challenges for LLM-based discovery frameworks across different model architectures, suggesting that current approaches may be fundamentally limited in their ability to perform genuine scientific discovery." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 705, + 542, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 705, + 542, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 705, + 542, + 718 + ], + "type": "text", + "content": "This work questions the current evaluation paradigm for equation discovery in emerging LLM-based techniques. We" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 67, + 517, + 261 + ], + "blocks": [ + { + "bbox": [ + 78, + 67, + 517, + 261 + ], + "lines": [ + { + "bbox": [ + 78, + 67, + 517, + 261 + ], + "spans": [ + { + "bbox": [ + 78, + 67, + 517, + 261 + ], + "type": "image", + "image_path": "221b1498b3f8e47e6bf90ed1d149435f76b18381710e3d5b0115980fca2e9cd3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 94, + 275, + 496, + 286 + ], + "lines": [ + { + "bbox": [ + 94, + 275, + 496, + 286 + ], + "spans": [ + { + "bbox": [ + 94, + 275, + 496, + 286 + ], + "type": "text", + "content": "Figure 13. Symbolic Accuracy versus OOD performance over all domains, methods, and backbone LLM pairs." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 308, + 544, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 308, + 544, + 545 + ], + "spans": [ + { + "bbox": [ + 51, + 308, + 544, + 545 + ], + "type": "text", + "content": "demonstrate that existing benchmarks for this task are susceptible to memorization and inadequate for evaluating these techniques' true scientific discovery capabilities. Motivated by these limitations, we designed LLM-SRBench to address the memorization issue through two key innovations: synthetic imaginary scenarios (LSR-Synth category) that are not based on existing scientific knowledge and require data-driven discovery tools for solution, and transformed equations (LSR-Transform category) that convert common forms of scientifically known equations into less familiar formulations. The LSR-Synth category targets genuine innovation in LLM-based discovery techniques by eliminating the possibility of recalling memorized equations, while LSR-Transform problems are difficult to recite from memory and require reasoning over hypothesis generation steps, making them suitable candidates for evaluating recently emerging LLM-based scientific discovery agents. While the mathematical transformations in LSR-Transform are algebraically valid, their scientific meaningfulness varies considerably across contexts. Many transformations correspond to legitimate physics problems from the Feynman Lecture Series collection and represent alternative problem formulations with practical significance. For example, in the Harmonic Oscillator Energy problem, the original formulation " + }, + { + "bbox": [ + 51, + 308, + 544, + 545 + ], + "type": "inline_equation", + "content": "E = \\frac{1}{4} m(\\omega^2 + \\omega_0^2)x^2" + }, + { + "bbox": [ + 51, + 308, + 544, + 545 + ], + "type": "text", + "content": " expresses energy as a function of system parameters, while the transformed version " + }, + { + "bbox": [ + 51, + 308, + 544, + 545 + ], + "type": "inline_equation", + "content": "m = \\frac{4E}{(\\omega^2 + \\omega_0^2)x^2}" + }, + { + "bbox": [ + 51, + 308, + 544, + 545 + ], + "type": "text", + "content": " determines the mass required for given energy storage. This transformation maintains scientific meaning by addressing the engineering question of what mass is needed to store a specific amount of energy in an oscillating system, and such inversions are common in engineering design problems where system parameters must be determined to achieve desired performance characteristics. Similarly, the Electric Potential problem transforms from " + }, + { + "bbox": [ + 51, + 308, + 544, + 545 + ], + "type": "inline_equation", + "content": "V_e = \\frac{1}{4\\pi\\epsilon}\\frac{p_d\\cos(\\theta)}{r^2}" + }, + { + "bbox": [ + 51, + 308, + 544, + 545 + ], + "type": "text", + "content": " (potential at a point due to a dipole) to " + }, + { + "bbox": [ + 51, + 308, + 544, + 545 + ], + "type": "inline_equation", + "content": "r = \\sqrt{\\frac{p_d\\cos(\\theta)}{4\\pi\\epsilon V_e}}" + }, + { + "bbox": [ + 51, + 308, + 544, + 545 + ], + "type": "text", + "content": " (distance for a given potential), addressing the practical question of determining measurement distances in electrostatic experiments or sensor design." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 550, + 544, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 550, + 544, + 718 + ], + "spans": [ + { + "bbox": [ + 51, + 550, + 544, + 718 + ], + "type": "text", + "content": "However, not all transformations maintain clear physical interpretability. Some result in equations where the target variable appears in complex functional forms that may not correspond to natural physical questions, such as solving for angular frequency in oscillatory systems yielding expressions involving square roots of differences that lack intuitive physical meaning. Additionally, certain transformations may obscure natural causal relationships—transforming from “force causes acceleration” to “acceleration determines force” maintains mathematical validity but may not reflect underlying physical causality. The LSR-Transform category represents a deliberate balance between mathematical rigor and physical meaningfulness by constraining the complexity of transformed problems to match original problems, focusing on semantic rather than syntactic challenges in scientific equation discovery, while maintaining the original scientific context and variable meanings to ensure that underlying physics remains relevant even when mathematical formulation changes. The varying scientific meaningfulness of transformations reflects broader challenges in automated scientific discovery that warrant future investigation. Automated discovery systems must incorporate mechanisms to evaluate not only data-driven correctness but also scientific plausibility and interpretability of generated hypotheses, as mathematical validity alone is insufficient for meaningful scientific contribution. The most effective approach to scientific equation discovery likely involves close collaboration between AI systems, which excel at exploring vast hypothesis spaces, and human domain scientists, who can" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 68, + 543, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 68, + 543, + 152 + ], + "spans": [ + { + "bbox": [ + 51, + 68, + 543, + 152 + ], + "type": "text", + "content": "assess scientific meaningfulness and guide discovery directions based on deep contextual understanding. Future equation discovery methods could improve by incorporating literature retrieval tools to build grounding foundations for scientific context and domain knowledge, helping to prioritize discoveries that are mathematically valid, data-consistent, novel, and scientifically meaningful. The field needs evaluation frameworks that assess not just mathematical correctness but also scientific novelty, interpretability, and practical applicability of discovered equations, moving beyond narrow accuracy metrics toward a more comprehensive understanding of what constitutes valuable scientific discovery in the age of LLMs with their vast scientific knowledge." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 166, + 428, + 180 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 166, + 428, + 180 + ], + "spans": [ + { + "bbox": [ + 52, + 166, + 428, + 180 + ], + "type": "text", + "content": "F. Comparison with Standard (non-LLM) Symbolic Regression Baselines" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 186, + 543, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 186, + 543, + 296 + ], + "spans": [ + { + "bbox": [ + 51, + 186, + 543, + 296 + ], + "type": "text", + "content": "To further validate the utility of LLM-SRBench and demonstrate the advantages of LLM-based approaches, we conducted additional experiments comparing LLM-based methods with traditional symbolic regression techniques that do not incorporate domain knowledge. We evaluated PySR (Cranmer, 2023), a state-of-the-art symbolic regression method based on genetic programming, on all LLM-SRBench datasets. PySR operates purely on numerical data points without access to the scientific context, variable descriptions, or domain knowledge that LLM-based methods can leverage in discovery process. We used PySR's default configuration with the same computational budget (equivalent number of evaluations) as the LLM-based methods to ensure fair comparison. Table 3 presents the performance comparison between the best-performing LLM-based method from Table 1 and PySR across all LLM-SRBench datasets. The results reveal several key insights about the complementary strengths and limitations of non-LLM versus LLM-based approaches in equation discovery." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 300, + 544, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 300, + 544, + 445 + ], + "spans": [ + { + "bbox": [ + 51, + 300, + 544, + 445 + ], + "type": "text", + "content": "PySR demonstrates competitive and sometimes even better numerical accuracy " + }, + { + "bbox": [ + 51, + 300, + 544, + 445 + ], + "type": "inline_equation", + "content": "(\\mathrm{Acc}_{0.1})" + }, + { + "bbox": [ + 51, + 300, + 544, + 445 + ], + "type": "text", + "content": " across all datasets. However, PySR consistently shows significantly lower symbolic accuracy, particularly struggling with non-physics domains where it achieves " + }, + { + "bbox": [ + 51, + 300, + 544, + 445 + ], + "type": "inline_equation", + "content": "0\\%" + }, + { + "bbox": [ + 51, + 300, + 544, + 445 + ], + "type": "text", + "content": " symbolic accuracy on chemistry, biology, and material science datasets. The performance gap is most pronounced in problems that require specialized scientific knowledge. While PySR can fit mathematical patterns in the data, it lacks the scientific intuition to discover equations that align with established physical principles or domain-specific terminology. Interestingly, PySR shows relatively better performance on physics problems, achieving modest symbolic accuracy of " + }, + { + "bbox": [ + 51, + 300, + 544, + 445 + ], + "type": "inline_equation", + "content": "4.54\\%" + }, + { + "bbox": [ + 51, + 300, + 544, + 445 + ], + "type": "text", + "content": " on LSR-Synth Physics and " + }, + { + "bbox": [ + 51, + 300, + 544, + 445 + ], + "type": "inline_equation", + "content": "8.11\\%" + }, + { + "bbox": [ + 51, + 300, + 544, + 445 + ], + "type": "text", + "content": " on LSR-Transform (which is based on Feynman physics equations). This suggests that physics problems may contain mathematical patterns that are more aligned with the dictionary design in PySR. So they can be discovered better through the data-driven search pipeline designed in PySR. These findings strengthen the motivation for LLM-based scientific equation discovery and demonstrate that LLM-SRBench successfully captures challenges in equation discovery that traditional symbolic regression methods cannot adequately address through numerical data-driven optimization alone." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 103, + 491, + 493, + 588 + ], + "blocks": [ + { + "bbox": [ + 52, + 463, + 542, + 485 + ], + "lines": [ + { + "bbox": [ + 52, + 463, + 542, + 485 + ], + "spans": [ + { + "bbox": [ + 52, + 463, + 542, + 485 + ], + "type": "text", + "content": "Table 3. Performance comparison between LLM-based methods and state-of-the-art non-LLM symbolic regression baseline PySR on LLM-SRBench. SA = Symbolic Accuracy (%), Acc0.1 = Accuracy to tolerance 0.1 (%)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 103, + 491, + 493, + 588 + ], + "lines": [ + { + "bbox": [ + 103, + 491, + 493, + 588 + ], + "spans": [ + { + "bbox": [ + 103, + 491, + 493, + 588 + ], + "type": "table", + "html": "
Dataset (Metric)LLM-SR (best) SA / Acc0.1LaSR (best) SA / Acc0.1SGA (best) SA / Acc0.1PySR SA / Acc0.1
LSR-Transform31.53 / 39.6412.61 / 50.459.91 / 8.118.11 / 56.76
LSR-Synth Chemistry11.11 / 66.662.77 / 38.920 / 16.660 / 41.67
LSR-Synth Biology25.30 / 58.338.33 / 20.834.16 / 12.510 / 25.0
LSR-Synth Physics9.91 / 36.369.91 / 31.814.54 / 9.094.54 / 29.55
LSR-Synth Material Science20.24 / 88.2828.12 / 72.040 / 36.110 / 68.0
", + "image_path": "fb11f34bbb115c03e1386d66501627f890a455552d3792179b4cbe06566017dc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 58, + 627, + 538, + 703 + ], + "blocks": [ + { + "bbox": [ + 167, + 608, + 427, + 619 + ], + "lines": [ + { + "bbox": [ + 167, + 608, + 427, + 619 + ], + "spans": [ + { + "bbox": [ + 167, + 608, + 427, + 619 + ], + "type": "text", + "content": "Table 4: LSR-Synth mathematical equations for each scientific domain." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 627, + 538, + 703 + ], + "lines": [ + { + "bbox": [ + 58, + 627, + 538, + 703 + ], + "spans": [ + { + "bbox": [ + 58, + 627, + 538, + 703 + ], + "type": "table", + "html": "
DomainEquation IDEquation
ChemistryCKR1-kA(t)2+kzA(t)2/(βA(t)4+1)
CKR2-kA(t)2-kA(t)+kw cos(log(A(t)+1))
CKR3-kA(t)+kw cos(log(A(t)+1))
", + "image_path": "980926fc19c4a5dbe8a321ba11ada09393daa23e399bacfb2e5b2bb0db7cc03b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 433, + 706, + 531, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 433, + 706, + 531, + 718 + ], + "spans": [ + { + "bbox": [ + 433, + 706, + 531, + 718 + ], + "type": "text", + "content": "Continued on next page" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 83, + 537, + 696 + ], + "blocks": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "lines": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "spans": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "type": "text", + "content": "Table 4 - continued from previous page" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 83, + 537, + 696 + ], + "lines": [ + { + "bbox": [ + 59, + 83, + 537, + 696 + ], + "spans": [ + { + "bbox": [ + 59, + 83, + 537, + 696 + ], + "type": "table", + "html": "
DomainEquation IDEquation
CKR4-kA(t)2-kA(t) exp(-ks t)+kw cos(log(A(t)+1))
CKR5-kA(t)2+kqA(t) log(γt+1)
CKR6-k√(A(t)+kfA(t)0.33
CKR7-kA(t) exp(-ks t)+km sin(√A(t))
CKR8-kA(t) exp(-ks t)+kw cos(log(A(t)+1))
CKR9-kA(t)2-kA(t)+kt sin(log(A(t)+1))
CKR10-k√A(t)+kw cos(log(A(t)+1))
CKR11-kA(t)2+kt sin(log(A(t)+1))
CKR12-kA(t)2+km sin(√A(t))
CKR13-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR14-kA(t)+kp sin(ωA(t))
CKR15-k√A(t)-kA(t) exp(-ks t)+kp sin(ωA(t))
CKR16-k√A(t)-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR17-kA(t)+kfA(t)0.33
CKR18-kA(t) exp(-ks t)+kfA(t)0.33
CKR19-kA(t)2+kp sin(ωA(t))
CKR20-kA(t)2-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR21-kA(t) exp(-ks t)+kp sin(ωA(t))
CKR22-kA(t) exp(-ks t)+kqA(t) log(γt+1)
CKR23-kA(t)2-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR24-k√A(t)+kp sin(ωA(t))
CKR25-k√A(t)-kA(t)2+kfA(t)0.33
CKR26-kA(t)+kt sin(log(A(t)+1))
CKR27-kA(t)2-kA(t) exp(-ks t)+km sin(√A(t))
CKR28-kA(t)2-kA(t) exp(-ks t)+kfA(t)0.33
CKR29-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR30-kA(t)-kA(t) exp(-ks t)+kzA(t)2/(βA(t)4+1)
CKR31-kA(t)-kA(t) exp(-ks t)+kt sin(log(A(t)+1))
CKR32-k√A(t)-kA(t)+kw cos(log(A(t)+1))
CKR33-kA(t)-kA(t) exp(-ks t)+kfA(t)0.33
CKR34-k√A(t)-kA(t)2+kt sin(log(A(t)+1))
CKR35-kA(t)2+kfA(t)0.33
CKR36-kA(t)+kqA(t)log(γt+1)
", + "image_path": "49bbe1d88c7d1016d1299e0446c3a6af030be9f0f4775174147f6c3751655ae0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 433, + 700, + 531, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 433, + 700, + 531, + 712 + ], + "spans": [ + { + "bbox": [ + 433, + 700, + 531, + 712 + ], + "type": "text", + "content": "Continued on next page" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 83, + 537, + 691 + ], + "blocks": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "lines": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "spans": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "type": "text", + "content": "Table 4 - continued from previous page" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 83, + 537, + 691 + ], + "lines": [ + { + "bbox": [ + 59, + 83, + 537, + 691 + ], + "spans": [ + { + "bbox": [ + 59, + 83, + 537, + 691 + ], + "type": "table", + "html": "
DomainEquation IDEquation
BiologyBPG1r(1-P(t)/K0)P(t)+rP(t)0.33
BPG2rP(t)exp(-γt)+rP(t)2/(αP(t)+1)
BPG3βP(t)sin(ωt)+rP(t)exp(-γt)
BPG4r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
BPG5r(1-P(t)/K0)P(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG6r(1-P(t)/K0)P(t)+rP(t)2/(αP(t)+1)
BPG7-QαP(t)+r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG8r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+rP(t)0.33
BPG9r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG10r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
BPG11rP(t)0.33+rP(t)
BPG12r(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)exp(-γt)
BPG13βP(t)sin(ωt)+r(1-P(t)/K0)P(t)
BPG14r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG15r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)+rP(t)exp(-γt)
BPG16rP(t)0.33+rP(t)exp(-γt)
BPG17r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)0.33+rP(t)
BPG18r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)0.33
BPG19βP(t)sin(ωt)+r(1-P(t)/K0)P(t)+rP(t)
BPG20r(1-P(t)/K0)P(t)+rP(t)/tα
BPG21r(-1+P(t)/α)(1-P(t)/K0)P(t)+r(1-P(t)/K0)P(t)+rP(t)/(1+exp(-α(-β+P(t))))
BPG22r(-1+P(t)/α)(1-P(t)/K0)P(t)+rP(t)/tα
BPG23r(1-exp(-γP(t)))P(t)+rP(t)exp(-γt)
BPG24r(1-P(t)/K0)P(t)+r(1-exp(-γP(t)))P(t)
PhysicsPO1F0sin(t)-βsin(v(t))-ω02x(t)3-ω02x(t)exp(-|x(t)|)
PO2F0sin(t)-ω02x(t)-ω02x(t)exp(-|x(t)|)
PO3-αv(t)3-μ(1-x(t)2)v(t)-ω02x(t)-ω02x(t)exp(-|x(t)|)
PO4F0sin(t)-βsin(v(t))-2βv(t)
PO5F0sin(t)-αv(t)3-ω02(γ|v(t)|0.33+1)x(t)-ω02x(t)
PO6-βsin(v(t))-2βv(t)-ω02(γ|v(t)|0.33+1)x(t)-ω02x(t)3-ω02x(t)
PO7-βlog(|v(t)|+1)-2βv(t)-ω02x(t)3
PO8-αv(t)3-β|v(t)|0.33-ω02x(t)3
", + "image_path": "39fd3cc32845900a08aae36a6154ecdf290fd268ff7603ccbdc4f582bcc25f59.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 433, + 694, + 531, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 433, + 694, + 531, + 706 + ], + "spans": [ + { + "bbox": [ + 433, + 694, + 531, + 706 + ], + "type": "text", + "content": "Continued on next page" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 83, + 537, + 696 + ], + "blocks": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "lines": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "spans": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "type": "text", + "content": "Table 4 - continued from previous page" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 83, + 537, + 696 + ], + "lines": [ + { + "bbox": [ + 59, + 83, + 537, + 696 + ], + "spans": [ + { + "bbox": [ + 59, + 83, + 537, + 696 + ], + "type": "table", + "html": "
DomainEquation IDEquation
PO9-β|v(t)|0.33 - ω02x(t)3
PO10F0sin(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)
PO11F0sin(t) - ω02(γt+1)x(t) - ω02x(t)3 - ω02x(t)
PO12-βsin(v(t)) - ω02(γt+1)x(t) - ω02x(t)3
PO13F0sin(t) - αv(t)3 - β|v(t)|0.33 - ω02(γt+1)x(t) - ω02x(t)
PO14F0sin(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO15F0sin(t) - βlog(|v(t)| + 1) - βsin(v(t)) - 2βv(t) - μ(1-x(t)2)v(t)
PO16F0sin(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t) - ω02x(t) exp(-|x(t)|)
PO17F0sin(t) - βsin(x(t))v(t) - βsin(v(t)) - ω02x(t)3
PO18F0sin(t) - βsin(x(t))v(t) - 2βv(t) - ω02x(t)
PO19-βsin(x(t))v(t) - ω02x(t)
PO20-2βv(t) - ω02x(t) exp(-|x(t)|)
PO21-αv(t)3 - β log(|v(t)| + 1) - 2βv(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO22F0sin(t) - βsin(x(t))v(t)
PO23-2βv(t) - β exp(-|x(t)|)v(t) - μ(1-x(t)2)v(t) - ω02x(t)3
PO24F0sin(t) - βlog(|v(t)| + 1) - ω02x(t) exp(-|x(t)|)
PO25F0sin(t) - αv(t)3 - β log(|v(t)| + 1)
PO26F0sin(t) - βsin(v(t))
PO27F0sin(t) - βlog(|v(t)| + 1) - 2βv(t) - ω02x(t)3
PO28F0sin(t) - αv(t)3 - 2βv(t) - βexp(-|v(t)|)v(t)
PO29-2βv(t) - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3 - ω02x(t)
PO30-μ(1-x(t)2)v(t) - ω02(γt+1)x(t) - ω02x(t)3
PO31-αv(t)3 - βsin(x(t))v(t) - βsin(v(t)) - ω02x(t)3
PO32-ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3
PO33F0sin(t) - αv(t)3 - βexp(-|v(t)|)v(t) - ω02x(t)3
PO34-2βv(t) - μ(1-x(t)2)v(t) - ω02(γt+1)x(t) - ω02x(t)
PO35-2βv(t) - μ(1-x(t)2)v(t) - ω02(γ|v(t)|0.33 + 1)x(t)
PO36F0sin(t) - βsin(v(t)) - ω02(γ|v(t)|0.33 + 1)x(t)
PO37F0sin(t) - βexp(-|x(t)|)v(t)
PO38F0sin(t) - αv(t)3 - 2βv(t) - ω02(γt+1)x(t)
PO39-βsin(v(t)) - μ(1-x(t)2)v(t) - ω02x(t) exp(-|x(t)|)
PO40F0sin(t) - αv(t)3 - βexp(-|x(t)|)v(t) - μ(1-v(t)2)v(t)
PO41F0sin(t) - β|v(t)|0.33 - ω02(γ|v(t)|0.33 + 1)x(t) - ω02x(t)3 - ω02x(t)
", + "image_path": "f5d825a3634b5f016ef48b7a8e3de61dc5544258d05e1729a47a488b41937f63.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 433, + 700, + 531, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 433, + 700, + 531, + 711 + ], + "spans": [ + { + "bbox": [ + 433, + 700, + 531, + 711 + ], + "type": "text", + "content": "Continued on next page" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 304, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 304, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 304, + 740 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 83, + 537, + 606 + ], + "blocks": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "lines": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "spans": [ + { + "bbox": [ + 217, + 70, + 378, + 82 + ], + "type": "text", + "content": "Table 4 - continued from previous page" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 83, + 537, + 606 + ], + "lines": [ + { + "bbox": [ + 59, + 83, + 537, + 606 + ], + "spans": [ + { + "bbox": [ + 59, + 83, + 537, + 606 + ], + "type": "table", + "html": "
DomainEquation IDEquation
PO42-μ(1-x(t)2)v(t)-ω02x(t)exp(-|x(t)|)
PO43F0sin(t)-αv(t)3-βsin(x(t))v(t)-2βv(t)
PO44F0sin(t)-βsin(x(t))v(t)-2βv(t)-μ(1-x(t)2)v(t)-ω02x(t) exp(-|x(t)|)
MaterialMatSci1E0ε(-αT(T-T0)+1)-β(T-T0)+εMη(T-T0)
MatSci2Hε3+KεNexp(-Q/(RT))+εηsin(T-T0)
MatSci3Hε3+η(T-T0)exp(-ε)
MatSci4Hε3+KεNexp(-Q/(RT))+ε3η(T-T0)
MatSci5E0ε2+η(T-T0)log(ε+1)
MatSci6E0ε(-αT(T-T0)+1)+KεNexp(-Q/(RT))+εMη(T-T0)
MatSci7E0ε(-αT(T-T0)+1)+εη(T-T0)2
MatSci8Hε3-β(T-T0)+η(T-T0)log(ε+1)
MatSci9E0ε(-αT(T-T0)+1)+εMη(T-T0)
MatSci10Hε3-β(T-T0)+ε3η(T-T0)
MatSci11Hε3+KεNexp(-Q/(RT))+εη(T-T0)2
MatSci12KεNexp(-Q/(RT))+ε3η(T-T0)
MatSci13E0ε(-αT(T-T0)+1)+KεNexp(-Q/(RT))+εηexp(-(T-T0)2)
MatSci14-β(T-T0)+εηexp(-(T-T0)2)
MatSci15-β(T-T0)+εMη(T-T0)
MatSci16E0ε(-αT(T-T0)+1)+εηexp(-(T-T0)2)
MatSci17E0ε2+εη(T-T0)2
MatSci18E0ε(-αT(T-T0)+1)-β(T-T0)+η(T-T0)log(ε+1)
MatSci19Hε3+η(T-T0)sin(ε)
MatSci20E0ε2-β(T-T0)+ε3η(T-T0)
MatSci21E0ε2+εηsin(T-T0)
MatSci22KεNexp(-Q/(RT))-β(T-T0)+η(T-T0)log(ε+1)
MatSci23E0ε(-αT(T-T0)+1)+Hε3+η(T-T0)sin(ε)
MatSci24KεNexp(-Q/(RT))+εηsin(T-T0)
MatSci25E0ε2+E0ε(-αT(T-T0)+1)+η(T-T0)log(ε+1)
", + "image_path": "4f27e9f084032a2c13ab16f8039019f7fcbfd78a412426da76d8a73012a4b88b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 118, + 178, + 479, + 338 + ], + "blocks": [ + { + "bbox": [ + 184, + 112, + 416, + 135 + ], + "lines": [ + { + "bbox": [ + 184, + 112, + 416, + 135 + ], + "spans": [ + { + "bbox": [ + 184, + 112, + 416, + 135 + ], + "type": "text", + "content": "Ground Truth: " + }, + { + "bbox": [ + 184, + 112, + 416, + 135 + ], + "type": "inline_equation", + "content": "\\frac{dP}{dt} = 0.9540\\left(1 - \\frac{P}{96.9069}\\right)P + 0.9540P^{\\frac{1}{3}}" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 126, + 157, + 261, + 168 + ], + "lines": [ + { + "bbox": [ + 126, + 157, + 261, + 168 + ], + "spans": [ + { + "bbox": [ + 126, + 157, + 261, + 168 + ], + "type": "text", + "content": "a) Predicted: Direct (Llama-3.1-8b)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 118, + 178, + 479, + 338 + ], + "lines": [ + { + "bbox": [ + 118, + 178, + 479, + 338 + ], + "spans": [ + { + "bbox": [ + 118, + 178, + 479, + 338 + ], + "type": "text", + "content": "def equation(t: np.ndarray, P: np.ndarray, params: np.ndarray) -> np.ndarray:\n ''' Mathematical function for Population growth rate\nArgs:\n t: A numpy array representing observations of Time.\n P: A numpy array representing observations of Population at time t.\n params: Array of numeric constants or parameters to be optimized\nReturn:\n A numpy array representing Population growth rate as the result of applying the mathematical function to the inputs.\n''' output = (\n params[0] * P # A linear term based on population size\n + params[1] * P ** 2 # A squared term for birth or death ratio\n + params[2] * t + params[3] * P * t # Linear and linear cross term\n + params[4]\n)\nreturn output" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "python" + }, + { + "type": "code", + "bbox": [ + 115, + 384, + 482, + 671 + ], + "blocks": [ + { + "bbox": [ + 127, + 362, + 257, + 373 + ], + "lines": [ + { + "bbox": [ + 127, + 362, + 257, + 373 + ], + "spans": [ + { + "bbox": [ + 127, + 362, + 257, + 373 + ], + "type": "text", + "content": "b) Predicted: SGA (Llama-3.1-8b)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 115, + 384, + 482, + 671 + ], + "lines": [ + { + "bbox": [ + 115, + 384, + 482, + 671 + ], + "spans": [ + { + "bbox": [ + 115, + 384, + 482, + 671 + ], + "type": "text", + "content": "class SymbolicEquation(nnModule):\n def __init__(self, r: float = 0.5, K: float = 100.0, time_scale: float = 0.05):\n Define trainable continuous parameters for differentiable optimization.\n Tentatively initialize the parameters with the default values in args.\n Params:\n r (float): growth rate.\n K (float): carrying capacity.\n time_scale (float): time-dependent coefficient.\n super().__init()\n self.r = nn_PARAMETER(torch.tensor(r, dtype=torch.float)) # (1,) \n self.K = nn_PARAMETER(torch.tensor(K, dtype=torch.float)) # (1,) \n self.time_scale = nn_PARAMETER(torch.tensor(time_scale, dtype=torch.float))\n # # (1,) \n # self.r = nn_PARAMETER(torch.tensor(r, dtype=torch.float)) \ndef forward(self, t: torch.Tensor, P: torch.Tensor) -> torch.Tensor:\n \"\"Mathematical function for Population growth rate\n Params:\n t: Time.\n P: Population at time t.\n Return:\n dP_dt: Population growth rate.\n \"\"\n # # (B,) \n dP_dt = self.r * torch.exp(-self.time_scale*t) * P * (1 - P / self.K) # (B,) \nreturn dP_dt" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 207, + 254, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 207, + 254, + 217 + ], + "spans": [ + { + "bbox": [ + 121, + 207, + 254, + 217 + ], + "type": "text", + "content": "c) Predicted: LaSR (Llama-3.1-8b)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 228, + 476, + 250 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 228, + 476, + 250 + ], + "spans": [ + { + "bbox": [ + 115, + 228, + 476, + 250 + ], + "type": "interline_equation", + "content": "\\frac {d P}{d t} = \\left(- 9 1 0. 3 1 - \\left(\\left(P \\cdot (P + \\sqrt {P})\\right) + \\log \\left((t + 4. 6 3 9 6) - (- 0. 6 6 0 9 5)\\right)\\right) \\cdot \\sin \\left(\\frac {- 0 . 9 5 5 7 2}{e ^ {t} - t}\\right)\\right) + 9 1 0. 3", + "image_path": "7967b1fc36cb281ebaa90a39635f07a107d1c6c1634b17b707e0936933c15249.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 120, + 284, + 263, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 284, + 263, + 294 + ], + "spans": [ + { + "bbox": [ + 120, + 284, + 263, + 294 + ], + "type": "text", + "content": "d) Predicted: LLM-SR (Llama-3.1-8b)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 307, + 428, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 307, + 428, + 320 + ], + "spans": [ + { + "bbox": [ + 113, + 307, + 428, + 320 + ], + "type": "text", + "content": "def equation(t: np.ndarray, P: np.ndarray, params: np.ndarray) -> np.ndarray:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 113, + 323, + 472, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 323, + 472, + 340 + ], + "spans": [ + { + "bbox": [ + 113, + 323, + 472, + 340 + ], + "type": "text", + "content": "Mathematical function for Population growth rate with a logistic growth model, decay, and periodicity." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 126, + 349, + 148, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 349, + 148, + 355 + ], + "spans": [ + { + "bbox": [ + 126, + 349, + 148, + 355 + ], + "type": "text", + "content": "Args:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 357, + 350, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 357, + 350, + 364 + ], + "spans": [ + { + "bbox": [ + 143, + 357, + 350, + 364 + ], + "type": "text", + "content": "t: A numpy array representing observations of Time." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 365, + 415, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 365, + 415, + 373 + ], + "spans": [ + { + "bbox": [ + 143, + 365, + 415, + 373 + ], + "type": "text", + "content": "P: A numpy array representing observations of Population at time t." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 114, + 373, + 481, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 373, + 481, + 388 + ], + "spans": [ + { + "bbox": [ + 114, + 373, + 481, + 388 + ], + "type": "text", + "content": "params: Array of numeric constants or parameters to be optimized (k, M, r, a, b, A, w, tau)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 126, + 397, + 159, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 397, + 159, + 403 + ], + "spans": [ + { + "bbox": [ + 126, + 397, + 159, + 403 + ], + "type": "text", + "content": "Returns:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 405, + 465, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 405, + 465, + 421 + ], + "spans": [ + { + "bbox": [ + 113, + 405, + 465, + 421 + ], + "type": "text", + "content": "A numpy array representing Population growth rate as the result of applying the mathematical function to the inputs." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 126, + 421, + 140, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 421, + 140, + 426 + ], + "spans": [ + { + "bbox": [ + 126, + 421, + 140, + 426 + ], + "type": "text", + "content": "1 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 126, + 429, + 309, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 429, + 309, + 437 + ], + "spans": [ + { + "bbox": [ + 126, + 429, + 309, + 437 + ], + "type": "text", + "content": "Apply transformations to inputs (if needed)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 126, + 437, + 270, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 437, + 270, + 445 + ], + "spans": [ + { + "bbox": [ + 126, + 437, + 270, + 445 + ], + "type": "inline_equation", + "content": "t = t / np.\\max(t)" + }, + { + "bbox": [ + 126, + 437, + 270, + 445 + ], + "type": "text", + "content": " #Normalize time" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 126, + 445, + 294, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 445, + 294, + 453 + ], + "spans": [ + { + "bbox": [ + 126, + 445, + 294, + 453 + ], + "type": "text", + "content": "P = P / np.max(P) #Normalize population" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 126, + 462, + 318, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 462, + 318, + 469 + ], + "spans": [ + { + "bbox": [ + 126, + 462, + 318, + 469 + ], + "type": "text", + "content": "Compute the rates using logistic growth model" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 126, + 470, + 428, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 470, + 428, + 478 + ], + "spans": [ + { + "bbox": [ + 126, + 470, + 428, + 478 + ], + "type": "inline_equation", + "content": "\\mathrm{dP\\_dt} = \\mathrm{params}[0] * \\mathrm{P} * (1 - \\mathrm{P}) - \\mathrm{params}[1] * \\mathrm{P} - \\mathrm{params}[2] * \\mathrm{P}**\\mathrm{params}[3]" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 126, + 487, + 258, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 487, + 258, + 494 + ], + "spans": [ + { + "bbox": [ + 126, + 487, + 258, + 494 + ], + "type": "text", + "content": "Compute the periodicity factor" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 126, + 495, + 439, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 495, + 439, + 502 + ], + "spans": [ + { + "bbox": [ + 126, + 495, + 439, + 502 + ], + "type": "text", + "content": "periodic_factor = params[4] * np.sin(param[5] * (t - params[6]) + params[7])" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 126, + 511, + 279, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 511, + 279, + 518 + ], + "spans": [ + { + "bbox": [ + 126, + 511, + 279, + 518 + ], + "type": "text", + "content": "Combine growth rate and periodicity" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 126, + 519, + 323, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 519, + 323, + 527 + ], + "spans": [ + { + "bbox": [ + 126, + 519, + 323, + 527 + ], + "type": "inline_equation", + "content": "\\mathrm{dP\\_dt\\_with\\_periodicity} = \\mathrm{dP\\_dt} * \\mathrm{periodic\\_factor}" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 126, + 536, + 246, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 536, + 246, + 544 + ], + "spans": [ + { + "bbox": [ + 126, + 536, + 246, + 544 + ], + "type": "text", + "content": "return dP_dt_with_periodicity" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 51, + 564, + 541, + 586 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 564, + 541, + 586 + ], + "spans": [ + { + "bbox": [ + 51, + 564, + 541, + 586 + ], + "type": "text", + "content": "Figure 14. Example of output hypotheses from different LLM scientific equation discovery methods for BPG0 problem in LSR-Synth biology domain." + } + ] + } + ], + "index": 23, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 155, + 107, + 212, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 107, + 212, + 117 + ], + "spans": [ + { + "bbox": [ + 155, + 107, + 212, + 117 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 222, + 123, + 373, + 136 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 123, + 373, + 136 + ], + "spans": [ + { + "bbox": [ + 222, + 123, + 373, + 136 + ], + "type": "interline_equation", + "content": "0. 1 9 \\cdot A (t) + 0. 1 9 \\cdot \\cos (\\log (A (t) + 1))", + "image_path": "b097fbf8c4d56eba060df42ecd3b49838fa4ada765fd0915a1a5c18f2c0c67bf.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 125, + 157, + 261, + 168 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 157, + 261, + 168 + ], + "spans": [ + { + "bbox": [ + 125, + 157, + 261, + 168 + ], + "type": "text", + "content": "a) Predicted: Direct (Llama-3.1-8b)" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 123, + 177, + 459, + 335 + ], + "blocks": [ + { + "bbox": [ + 123, + 177, + 459, + 335 + ], + "lines": [ + { + "bbox": [ + 123, + 177, + 459, + 335 + ], + "spans": [ + { + "bbox": [ + 123, + 177, + 459, + 335 + ], + "type": "text", + "content": "def equation(t: np.ndarray, A: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Rate of change of concentration (Growth, Reaction or Decay)\n Args:\n t: A numpy array representing observations of Time.\n A: A numpy array representing observations of Concentration at time t.\n params: Array of numeric constants or parameters to be optimized.\n - params[0]: k (Rate constant, e.g., 0.1 min^-1)\n - params[1]: C0 (initial concentration, e.g., 10) for decay or growth\n models\n Return:\n A numpy array representing Rate of change of concentration as the result of applying the mathematical function to the inputs.\n - equation implemented under assumption that k*A represents linear decay, or linear growth.\n output = params[0] * A + params[1] * params[2]\n return output" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 127, + 376, + 257, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 376, + 257, + 387 + ], + "spans": [ + { + "bbox": [ + 127, + 376, + 257, + 387 + ], + "type": "text", + "content": "b) Predicted: SGA (Llama-3.1-8b)" + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 130, + 396, + 465, + 665 + ], + "blocks": [ + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "lines": [ + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "text", + "content": "class SymbolicEquation(nnModule): def init(self,k:float " + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "inline_equation", + "content": "= 9.00\\mathrm{e} - 02" + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "text", + "content": " A_0:float " + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "inline_equation", + "content": "= 9.60\\mathrm{e} - 01" + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "text", + "content": " A_t:float " + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "inline_equation", + "content": "= 9.60\\mathrm{e} - 01" + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "text", + "content": " .. Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args. Args: k(float):time-dependent reaction rate constant. A_0(float):initial concentration. A_t(float):time-dependent change in concentration. super().__init_() #(1,) self.k " + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "text", + "content": " nn.Parameters(torch.tensor(k,dtype " + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "inline_equation", + "content": "\\coloneqq" + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "text", + "content": " torch.float)) self.A_0 " + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "text", + "content": " nn.Parameterrtorch.tensor(A_0,dtype " + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "inline_equation", + "content": "\\coloneqq" + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "text", + "content": " torch.float) # (1,) self.A_t " + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "text", + "content": " nn.Parameterrtorch.tensor(A_t,dtype " + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "inline_equation", + "content": "\\coloneqq" + }, + { + "bbox": [ + 130, + 396, + 465, + 665 + ], + "type": "text", + "content": " torch.float) # (1,) def forward(self,t:torch.Tensor,A:torch.Tensor) -> torch.Tensor: \"\"Mathematical function for Rate of change of concentration in chemistry reaction kinetics Args: t:Time.#(B,1) A:Concentration at time t.#(B,1) Return: dA_dt:Rate of change of concentration in chemistry reaction kinetics. (# (B,1)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 171, + 261, + 304, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 261, + 304, + 273 + ], + "spans": [ + { + "bbox": [ + 171, + 261, + 304, + 273 + ], + "type": "text", + "content": "c) Predicted: LaSR (Llama-3.1-8b)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 154, + 281, + 438, + 297 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 281, + 438, + 297 + ], + "spans": [ + { + "bbox": [ + 154, + 281, + 438, + 297 + ], + "type": "interline_equation", + "content": "\\left(- 0. 0 0 4 0 4 0 2 / \\left(1. 3 2 2 2 ^ {t}\\right)\\right) * \\left(\\left(A ^ {t}\\right) ^ {\\cos \\left(\\left(\\left(t + \\left(\\left(A / (A / A)\\right) - 0. 0 4 3 4 7 7\\right)\\right) ^ {A}\\right) - A\\right) + \\left(\\left(0. 7 5 6 2 9 ^ {1. 2 0 2 8} t\\right)\\right)\\right)} \\left. \\right.", + "image_path": "3d6accc6d6374c2b7e89a67dfa6fd230f75a652a635a6c19664eb439522c7b18.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 331, + 263, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 331, + 263, + 342 + ], + "spans": [ + { + "bbox": [ + 119, + 331, + 263, + 342 + ], + "type": "text", + "content": "d) Predicted: LLM-SR (Llama-3.1-8b)" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 121, + 360, + 467, + 486 + ], + "blocks": [ + { + "bbox": [ + 121, + 360, + 467, + 486 + ], + "lines": [ + { + "bbox": [ + 121, + 360, + 467, + 486 + ], + "spans": [ + { + "bbox": [ + 121, + 360, + 467, + 486 + ], + "type": "text", + "content": "def equation(t: np.ndarray, A: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Rate of change of concentration in chemistry reaction kinetics\n Args:\n t: A numpy array representing observations of Time.\n A: A numpy array representing observations of Concentration at time t.\n params: Array of numeric constants or parameters to be optimized\n Return:\n A numpy array representing Rate of change of concentration in chemistry reaction kinetics as the result of applying the mathematical function to the inputs.\n output = params[0] * A + params[1] * A**2 + params[2] * A**3 + params[3] * t + params[4]\n return output" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 52, + 505, + 542, + 527 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 505, + 542, + 527 + ], + "spans": [ + { + "bbox": [ + 52, + 505, + 542, + 527 + ], + "type": "text", + "content": "Figure 15. Example of output hypotheses from different LLM scientific equation discovery methods for CKR2 problem in LSR-Synth chemistry domain." + } + ] + } + ], + "index": 5, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 154, + 79, + 212, + 89 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 79, + 212, + 89 + ], + "spans": [ + { + "bbox": [ + 154, + 79, + 212, + 89 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 147, + 91, + 444, + 104 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 91, + 444, + 104 + ], + "spans": [ + { + "bbox": [ + 147, + 91, + 444, + 104 + ], + "type": "interline_equation", + "content": "3 1. 1 2 \\cdot \\varepsilon \\cdot (- 0. 0 5 \\cdot (T - 3 1 6. 1 9) + 1) - 0. 2 3 \\cdot (T - 3 1 6. 1 9) + \\varepsilon^ {0. 6 0} \\cdot 3. 6 3 \\cdot (T - 3 1 6. 1 9)", + "image_path": "deefd5b634e32daa868eb85f29a5385e1c0ea66ff650356d310d88c0ea387d68.jpg" + } + ] + } + ], + "index": 2 + }, + { + "type": "code", + "bbox": [ + 123, + 149, + 452, + 322 + ], + "blocks": [ + { + "bbox": [ + 125, + 130, + 260, + 141 + ], + "lines": [ + { + "bbox": [ + 125, + 130, + 260, + 141 + ], + "spans": [ + { + "bbox": [ + 125, + 130, + 260, + 141 + ], + "type": "text", + "content": "a) Predicted: Direct (Llama-3.1-8b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 123, + 149, + 452, + 322 + ], + "lines": [ + { + "bbox": [ + 123, + 149, + 452, + 322 + ], + "spans": [ + { + "bbox": [ + 123, + 149, + 452, + 322 + ], + "type": "text", + "content": "def equation(eppsilon: np.ndarray, T: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Stress\nArgs:\n epsilon: A numpy array representing observations of Strain.\n T: A numpy array representing observations of Temperature.\n params: Array of numeric constants or parameters to be optimized\nReturn:\n A numpy array representing Stress as the result of applying the mathematical function to the inputs.\n \"\"\n reference_temperature = 0\n sigma_0 = params[0]\n alpha = params[1]\n n = params[2]\n linear_epsilon_term = params[3] * epsilon\n multiplicative_epsilon_term = params[4] * epsilon\n linear_temperature_term = params[5] * T\n output = sigma_0 * (1 + alpha * (T - reference_temperature)) * epsilon**\n linear_epsilon_term + multiplicative_epsilon_term + linear_temperature_term\n return output" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "python" + }, + { + "type": "code", + "bbox": [ + 130, + 383, + 449, + 695 + ], + "blocks": [ + { + "bbox": [ + 127, + 354, + 257, + 365 + ], + "lines": [ + { + "bbox": [ + 127, + 354, + 257, + 365 + ], + "spans": [ + { + "bbox": [ + 127, + 354, + 257, + 365 + ], + "type": "text", + "content": "b) Predicted: SGA (Llama-3.1-8b)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "lines": [ + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "spans": [ + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": "class SymbolicEquation(nnModule): def__init__(self,c_0:float " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "= 9.83\\mathrm{e} - 01" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " ,c_1:float " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "= 9.86\\mathrm{e} - 01" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " ,c_2:float " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "= 9.83\\mathrm{e} - 01" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " a:float " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "= -3.73\\mathrm{e} - 03" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " ,b:float " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "= -1.11\\mathrm{e} - 02" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " ,d:float " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "= 2.63\\mathrm{e} - 02" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " : Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args. \nArgs: c_0(float): coefficient. c_1(float): coefficient. c_2(float): coefficient. a(float): coefficient for quadratic term. b(float): coefficient for exponential term. d(float): coefficient for non-linear relationship between Strain and Temperature. super()._init_(self.c_0=nn.Parameters(torch.tensor(c_0,dtype " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " torch.float)) # (1,) self.c_1 = nn.Parameter(torch.tensor(c_1,dtype " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " torch.float)) # (1,) self.c_2 = nn.Parameter(torch.tensor(c_2,dtype " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " torch.float)) # (1,) self.a = nn.Parameter(torch.tensor(a,dtype " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " torch.float)) # (1,) self.b = nn.Parameter(torch.tensor(b,dtype " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " torch.float)) # (1,) self.d = nn.Parameter(torch.tensor(d,dtype " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " torch.float)) # (1,) \ndef forward(self,epsilon:torch.Tensor,T:torch.Tensor) -> torch.Tensor:\"\"\"Mathematical function for Stress \nArgs: epsilon:Strain.#(B,1) T:Temperature.#(B,1) \nReturn: sigma:Stress.#(B,1) \"\"\" # (B,1) sigma " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " self.c_0 " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "^+" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " self.c_1 \\* torch.exp(self.a \\* epsilon " + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "inline_equation", + "content": "^+" + }, + { + "bbox": [ + 130, + 383, + 449, + 695 + ], + "type": "text", + "content": " self.b \\*T)+ self.c_2 \\* torch.cos(self.d \\* epsilon \\*T) return sigma" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 172, + 257, + 306, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 257, + 306, + 268 + ], + "spans": [ + { + "bbox": [ + 172, + 257, + 306, + 268 + ], + "type": "text", + "content": "c) Predicted: LaSR (Llama-3.1-8b)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 150, + 274, + 446, + 296 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 274, + 446, + 296 + ], + "spans": [ + { + "bbox": [ + 150, + 274, + 446, + 296 + ], + "type": "interline_equation", + "content": "\\left(\\left(\\sqrt {\\epsilon} \\cdot (- 7 1 2. 3 6) + \\left(\\left(\\frac {\\sqrt {\\epsilon}}{1 . 3 7 9 2 ^ {\\epsilon}} \\cdot 2. 2 7 9 8\\right) \\cdot (T - \\epsilon) + \\epsilon\\right) + 6. 8 1 2 5\\right) \\cdot 1. 5 0 7 6 ^ {\\sqrt {\\epsilon}}\\right) - \\sin (\\log (T))", + "image_path": "45452e4a8a849977e88075f5d626da8a4d10baf79ead0ecba98829cb4fc616f2.jpg" + } + ] + } + ], + "index": 2 + }, + { + "type": "code", + "bbox": [ + 123, + 350, + 469, + 485 + ], + "blocks": [ + { + "bbox": [ + 120, + 323, + 263, + 335 + ], + "lines": [ + { + "bbox": [ + 120, + 323, + 263, + 335 + ], + "spans": [ + { + "bbox": [ + 120, + 323, + 263, + 335 + ], + "type": "text", + "content": "d) Predicted: LLM-SR (Llama-3.1-8b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 123, + 350, + 469, + 485 + ], + "lines": [ + { + "bbox": [ + 123, + 350, + 469, + 485 + ], + "spans": [ + { + "bbox": [ + 123, + 350, + 469, + 485 + ], + "type": "text", + "content": "def equation(epsilon: np.ndarray, T: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Stress\nArgs:\n epsilon: A numpy array representing observations of Strain.\n T: A numpy array representing observations of Temperature.\n params: Array of numeric constants or parameters to be optimized\nReturn:\n A numpy array representing Stress as the result of applying the mathematical function to the inputs.\n>>> output = params[0] * (epsilon ** (1/params[4]) - 1) + \\\n params[1] * (epsilon ** (1/params[6]) - 1) + \\\n params[2] * (epsilon ** (1/params[8]) - 1) * np.exp.params[3] * epsilon) + \\\n params[5] * epsilon + params[6] * T + params[7]\nreturn output" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 52, + 511, + 541, + 533 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 511, + 541, + 533 + ], + "spans": [ + { + "bbox": [ + 52, + 511, + 541, + 533 + ], + "type": "text", + "content": "Figure 16. Example of output hypotheses from different LLM scientific equation discovery methods for MatSci0 problem in LSR-Synth material science domain." + } + ] + } + ], + "index": 5, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 155, + 93, + 212, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 93, + 212, + 103 + ], + "spans": [ + { + "bbox": [ + 155, + 93, + 212, + 103 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 178, + 106, + 422, + 122 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 106, + 422, + 122 + ], + "spans": [ + { + "bbox": [ + 178, + 106, + 422, + 122 + ], + "type": "interline_equation", + "content": "F _ {0} \\cdot \\sin (t) - \\beta \\cdot \\sin (v (t)) - \\omega_ {0} ^ {2} \\cdot x (t) ^ {3} - \\omega_ {0} ^ {2} \\cdot x (t) \\cdot e ^ {- | x (t) |}", + "image_path": "e74e4296a0117c3464660b21e81f5f6ba6c0fe69bfcfaa4bb89adf654b5ed093.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 125, + 144, + 261, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 144, + 261, + 155 + ], + "spans": [ + { + "bbox": [ + 125, + 144, + 261, + 155 + ], + "type": "text", + "content": "a) Predicted: Direct (Llama-3.1-8b)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 123, + 163, + 441, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 163, + 441, + 179 + ], + "spans": [ + { + "bbox": [ + 123, + 163, + 441, + 179 + ], + "type": "text", + "content": "def equation(x: np.ndarray, t: np.ndarray, v: np.ndarray, params: np.ndarray) -> np.ndarray:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 135, + 179, + 303, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 179, + 303, + 186 + ], + "spans": [ + { + "bbox": [ + 135, + 179, + 303, + 186 + ], + "type": "text", + "content": "\"\"Mathematical function for Acceleration" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 135, + 195, + 156, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 195, + 156, + 202 + ], + "spans": [ + { + "bbox": [ + 135, + 195, + 156, + 202 + ], + "type": "text", + "content": "Args:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 150, + 203, + 408, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 203, + 408, + 210 + ], + "spans": [ + { + "bbox": [ + 150, + 203, + 408, + 210 + ], + "type": "text", + "content": "x: A numpy array representing observations of Position at time t." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 151, + 211, + 352, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 211, + 352, + 218 + ], + "spans": [ + { + "bbox": [ + 151, + 211, + 352, + 218 + ], + "type": "text", + "content": "t: A numpy array representing observations of Time." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 151, + 219, + 408, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 219, + 408, + 226 + ], + "spans": [ + { + "bbox": [ + 151, + 219, + 408, + 226 + ], + "type": "text", + "content": "v: A numpy array representing observations of Velocity at time t." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 151, + 227, + 405, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 227, + 405, + 234 + ], + "spans": [ + { + "bbox": [ + 151, + 227, + 405, + 234 + ], + "type": "text", + "content": "params: Array of numeric constants or parameters to be optimized" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 135, + 243, + 164, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 243, + 164, + 249 + ], + "spans": [ + { + "bbox": [ + 135, + 243, + 164, + 249 + ], + "type": "text", + "content": "Return:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 123, + 250, + 425, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 250, + 425, + 265 + ], + "spans": [ + { + "bbox": [ + 123, + 250, + 425, + 265 + ], + "type": "text", + "content": "A numpy array representing Acceleration as the result of applying the mathematical function to the inputs." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 135, + 266, + 149, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 266, + 149, + 270 + ], + "spans": [ + { + "bbox": [ + 135, + 266, + 149, + 270 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 135, + 273, + 408, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 273, + 408, + 281 + ], + "spans": [ + { + "bbox": [ + 135, + 273, + 408, + 281 + ], + "type": "text", + "content": "Since acceleration is Force/mass and Force is mass*velocity change," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 135, + 281, + 259, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 281, + 259, + 289 + ], + "spans": [ + { + "bbox": [ + 135, + 281, + 259, + 289 + ], + "type": "text", + "content": "we represent this equation as" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 135, + 290, + 469, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 290, + 469, + 304 + ], + "spans": [ + { + "bbox": [ + 135, + 290, + 469, + 304 + ], + "type": "text", + "content": "a = dv/dt or in our representation format, given our position x it will correspond" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 123, + 304, + 465, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 304, + 465, + 320 + ], + "spans": [ + { + "bbox": [ + 123, + 304, + 465, + 320 + ], + "type": "text", + "content": "a = d^2x/dt^2 or by simple numeric analogy 4th order approximation involving high degree dependency functions:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 123, + 321, + 430, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 321, + 430, + 336 + ], + "spans": [ + { + "bbox": [ + 123, + 321, + 430, + 336 + ], + "type": "text", + "content": "output = (params[0]*x**4 + params[1]*x**3 + params[2]*x**2 + params[3]*x + params[4]) + params[5]*v**2 + params[6]*t + params[7]*(x**2 + v)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 135, + 337, + 189, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 337, + 189, + 344 + ], + "spans": [ + { + "bbox": [ + 135, + 337, + 189, + 344 + ], + "type": "text", + "content": "return output" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 127, + 367, + 258, + 378 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 367, + 258, + 378 + ], + "spans": [ + { + "bbox": [ + 127, + 367, + 258, + 378 + ], + "type": "text", + "content": "b) Predicted: SGA (Llama-3.1-8b)" + } + ] + } + ], + "index": 20 + }, + { + "type": "code", + "bbox": [ + 130, + 387, + 449, + 424 + ], + "blocks": [ + { + "bbox": [ + 130, + 387, + 449, + 424 + ], + "lines": [ + { + "bbox": [ + 130, + 387, + 449, + 424 + ], + "spans": [ + { + "bbox": [ + 130, + 387, + 449, + 424 + ], + "type": "text", + "content": "class SymbolicEquation(nnModule): def init(self,c_0: float " + }, + { + "bbox": [ + 130, + 387, + 449, + 424 + ], + "type": "inline_equation", + "content": "= 9.50\\mathrm{e} - 01" + }, + { + "bbox": [ + 130, + 387, + 449, + 424 + ], + "type": "text", + "content": " ,c_1: float " + }, + { + "bbox": [ + 130, + 387, + 449, + 424 + ], + "type": "inline_equation", + "content": "= 9.50\\mathrm{e} - 01" + }, + { + "bbox": [ + 130, + 387, + 449, + 424 + ], + "type": "text", + "content": " ,c_2: float " + }, + { + "bbox": [ + 130, + 387, + 449, + 424 + ], + "type": "inline_equation", + "content": "= 9.50\\mathrm{e} - 01" + }, + { + "bbox": [ + 130, + 387, + 449, + 424 + ], + "type": "text", + "content": " ,c_3: float " + }, + { + "bbox": [ + 130, + 387, + 449, + 424 + ], + "type": "inline_equation", + "content": "= 1.05\\mathrm{e} + 00" + }, + { + "bbox": [ + 130, + 387, + 449, + 424 + ], + "type": "text", + "content": " :" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "code_body" + } + ], + "index": 21, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 157, + 427, + 439, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 427, + 439, + 443 + ], + "spans": [ + { + "bbox": [ + 157, + 427, + 439, + 443 + ], + "type": "text", + "content": "Define trainable continuous parameters for differentiable optimization. Tentatively initialize the parameters with the default values in args." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 159, + 451, + 179, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 451, + 179, + 457 + ], + "spans": [ + { + "bbox": [ + 159, + 451, + 179, + 457 + ], + "type": "text", + "content": "Args:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 175, + 458, + 274, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 458, + 274, + 465 + ], + "spans": [ + { + "bbox": [ + 175, + 458, + 274, + 465 + ], + "type": "text", + "content": "c_0(float): coefficient." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 175, + 466, + 272, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 466, + 272, + 472 + ], + "spans": [ + { + "bbox": [ + 175, + 466, + 272, + 472 + ], + "type": "text", + "content": "c 1(float): coefficient." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 175, + 473, + 274, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 473, + 274, + 479 + ], + "spans": [ + { + "bbox": [ + 175, + 473, + 274, + 479 + ], + "type": "text", + "content": "c_2(float); coefficient." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 175, + 481, + 274, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 481, + 274, + 488 + ], + "spans": [ + { + "bbox": [ + 175, + 481, + 274, + 488 + ], + "type": "text", + "content": "c 3 (float): coefficient." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 159, + 490, + 173, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 490, + 173, + 495 + ], + "spans": [ + { + "bbox": [ + 159, + 490, + 173, + 495 + ], + "type": "text", + "content": "1 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 159, + 497, + 431, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 497, + 431, + 512 + ], + "spans": [ + { + "bbox": [ + 159, + 497, + 431, + 512 + ], + "type": "text", + "content": "super().__init_(self.c 0 = nn.Parameter(torch.tensor(c 0, dtype=torch.float)) # (1,)" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 159, + 513, + 430, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 513, + 430, + 519 + ], + "spans": [ + { + "bbox": [ + 159, + 513, + 430, + 519 + ], + "type": "text", + "content": "self.c1 = nn_PARAMETER(torch.tensor(c1, dtype=torch.float)) # (1,)" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 159, + 521, + 430, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 521, + 430, + 528 + ], + "spans": [ + { + "bbox": [ + 159, + 521, + 430, + 528 + ], + "type": "text", + "content": "self.c_2 = nn_PARAMETER(torch:tensor(c_2, dtype=torch.float)) # (1,)" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 159, + 529, + 430, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 529, + 430, + 536 + ], + "spans": [ + { + "bbox": [ + 159, + 529, + 430, + 536 + ], + "type": "text", + "content": "self.c_3 = nn_PARAMETER(torch.tensor(c_3, dtype=torch.float)) # (1,)" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 143, + 551, + 425, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 551, + 425, + 567 + ], + "spans": [ + { + "bbox": [ + 143, + 551, + 425, + 567 + ], + "type": "text", + "content": "def forward(self, x: torch.Tensor, t: torch.Tensor, v: torch.Tensor) -> torch.Tensor:" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 159, + 567, + 460, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 567, + 460, + 574 + ], + "spans": [ + { + "bbox": [ + 159, + 567, + 460, + 574 + ], + "type": "text", + "content": "\"\"Mathematical function for Acceleration in Nonl-linear Harmonic Oscillator" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 159, + 583, + 179, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 583, + 179, + 590 + ], + "spans": [ + { + "bbox": [ + 159, + 583, + 179, + 590 + ], + "type": "text", + "content": "Args:" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 175, + 591, + 293, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 591, + 293, + 597 + ], + "spans": [ + { + "bbox": [ + 175, + 591, + 293, + 597 + ], + "type": "text", + "content": "x: Position at time t. # (B,)" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 175, + 599, + 238, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 599, + 238, + 605 + ], + "spans": [ + { + "bbox": [ + 175, + 599, + 238, + 605 + ], + "type": "text", + "content": "t: Time. # (B,)" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 175, + 607, + 293, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 607, + 293, + 614 + ], + "spans": [ + { + "bbox": [ + 175, + 607, + 293, + 614 + ], + "type": "text", + "content": "v: Velocity at time t. # (B,)" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 159, + 623, + 187, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 623, + 187, + 629 + ], + "spans": [ + { + "bbox": [ + 159, + 623, + 187, + 629 + ], + "type": "text", + "content": "Return:" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 175, + 630, + 423, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 630, + 423, + 637 + ], + "spans": [ + { + "bbox": [ + 175, + 630, + 423, + 637 + ], + "type": "text", + "content": "dv dt: Acceleration in Nonl-linear Harmonic Oscillator. # (B,)" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 159, + 638, + 172, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 638, + 172, + 643 + ], + "spans": [ + { + "bbox": [ + 159, + 638, + 172, + 643 + ], + "type": "text", + "content": "1 1" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 159, + 645, + 338, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 645, + 338, + 653 + ], + "spans": [ + { + "bbox": [ + 159, + 645, + 338, + 653 + ], + "type": "text", + "content": "Non-linear relationship between x, t, and v" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 123, + 654, + 463, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 654, + 463, + 669 + ], + "spans": [ + { + "bbox": [ + 123, + 654, + 463, + 669 + ], + "type": "text", + "content": "dv_dt = self.c_0 * torch.exp(-self.c_1 * x) + self.c_2 * torch.cos(self.c_3 * t + self.c_3 * x) # (B, )" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 159, + 670, + 208, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 670, + 208, + 677 + ], + "spans": [ + { + "bbox": [ + 159, + 670, + 208, + 677 + ], + "type": "text", + "content": "return dv_dt" + } + ] + } + ], + "index": 44 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 45 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 171, + 228, + 304, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 228, + 304, + 239 + ], + "spans": [ + { + "bbox": [ + 171, + 228, + 304, + 239 + ], + "type": "text", + "content": "c) Predicted: LaSR (Llama-3.1-8b)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 225, + 247, + 373, + 271 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 247, + 373, + 271 + ], + "spans": [ + { + "bbox": [ + 225, + 247, + 373, + 271 + ], + "type": "interline_equation", + "content": "\\left(\\frac {\\left((x + x) \\cdot \\left(\\left(\\frac {\\sqrt {x ^ {\\mathrm {l i n e s}} - \\frac {x}{\\sin x}}}{x}\\right) \\cdot \\sin (1 . 1 4 7 8 \\cdot t) - x\\right) \\cdot \\sin (x)\\right)}{1 . 7 0 5 2} - \\sin (0. 0 0 3 2 8 2 7)\\right)", + "image_path": "d03139fa8f37c0317378e921fd4d64d969658d30eed59db871fb79cadd984a89.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 301, + 263, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 301, + 263, + 312 + ], + "spans": [ + { + "bbox": [ + 119, + 301, + 263, + 312 + ], + "type": "text", + "content": "d) Predicted: LLM-SR (Llama-3.1-8b)" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 123, + 327, + 441, + 350 + ], + "blocks": [ + { + "bbox": [ + 123, + 327, + 441, + 350 + ], + "lines": [ + { + "bbox": [ + 123, + 327, + 441, + 350 + ], + "spans": [ + { + "bbox": [ + 123, + 327, + 441, + 350 + ], + "type": "text", + "content": "def equation(x: np.ndarray, t: np.ndarray, v: np.ndarray, params: np.ndarray) -> np.ndarray:\n \"\" Mathematical function for Acceleration" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 135, + 358, + 156, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 358, + 156, + 365 + ], + "spans": [ + { + "bbox": [ + 135, + 358, + 156, + 365 + ], + "type": "text", + "content": "Args:" + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 150, + 367, + 408, + 398 + ], + "blocks": [ + { + "bbox": [ + 150, + 367, + 408, + 398 + ], + "lines": [ + { + "bbox": [ + 150, + 367, + 408, + 398 + ], + "spans": [ + { + "bbox": [ + 150, + 367, + 408, + 398 + ], + "type": "text", + "content": "x: A numpy array representing observations of Position at time t. \nt: A numpy array representing observations of Time. \nv: A numpy array representing observations of Velocity at time t. \nparams: Array of numeric constants or parameters to be optimized" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 135, + 405, + 163, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 405, + 163, + 411 + ], + "spans": [ + { + "bbox": [ + 135, + 405, + 163, + 411 + ], + "type": "text", + "content": "Return:" + } + ] + } + ], + "index": 7 + }, + { + "type": "code", + "bbox": [ + 123, + 413, + 476, + 507 + ], + "blocks": [ + { + "bbox": [ + 123, + 413, + 476, + 507 + ], + "lines": [ + { + "bbox": [ + 123, + 413, + 476, + 507 + ], + "spans": [ + { + "bbox": [ + 123, + 413, + 476, + 507 + ], + "type": "text", + "content": "A numpy array representing Acceleration as the result of applying the mathematical function to the inputs. \n```python\n```\n# Since acceleration is Force/mass and Force is mass*velocity change,\n# we represent this equation as\n# a = dv/dt or in our representation format, given our position x it will correspond to\n# a = d^2x/dt^2 or by simple numeric analogy 4th order approximation involving high degree dependency functions:\noutput = (params[0]*x**4 + params[1]*x**3 + params[2]*x**2 + params[3]*x + params[4])\n+ params[5]*v**2 + params[6]*t + params[7]*(x**2 + v)\nreturn output" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 52, + 538, + 542, + 561 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 538, + 542, + 561 + ], + "spans": [ + { + "bbox": [ + 52, + 538, + 542, + 561 + ], + "type": "text", + "content": "Figure 17. Example of output hypotheses from different LLM scientific equation discovery methods for PO0 problem in LSR-Synth physics domain." + } + ] + } + ], + "index": 9, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "spans": [ + { + "bbox": [ + 105, + 45, + 489, + 56 + ], + "type": "text", + "content": "LLM-SRBench: A New Benchmark for Scientific Equation Discovery with Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10445/92de7eea-1f86-4346-b55b-8d273a167685_content_list.json b/data/2025/2504_10xxx/2504.10445/92de7eea-1f86-4346-b55b-8d273a167685_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..43512405746ed87a0649b06c11c447bbf84908ed --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/92de7eea-1f86-4346-b55b-8d273a167685_content_list.json @@ -0,0 +1,2315 @@ +[ + { + "type": "text", + "text": "RealWebAssist: A Benchmark for Long-Horizon Web Assistance with Real-World Users", + "text_level": 1, + "bbox": [ + 86, + 119, + 910, + 161 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Suyu Ye\\*, Haojun Shi\\*, Darren Shih 1, Hyokun Yun 2, Tanya G. Roosta 2, Tianmin Shu", + "bbox": [ + 124, + 172, + 875, + 193 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1Johns Hopkins University,", + "bbox": [ + 406, + 195, + 589, + 210 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2Amazon.com", + "bbox": [ + 449, + 210, + 547, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{sye10, hshi33, dshih5, tianmin.shu}@jhu.edu, {yunhyoku,troosta} $@$ amazon.com", + "bbox": [ + 225, + 223, + 771, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 273, + 313, + 286 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To achieve successful assistance with long-horizon web-based tasks, AI agents must be able to sequentially follow real-world user instructions over a long period. Unlike existing web-based agent benchmarks, sequential instruction following in the real world poses significant challenges beyond performing a single, clearly defined task. For instance, real-world human instructions can be ambiguous, require different levels of AI assistance, and may evolve over time, reflecting changes in the user's mental state. To address this gap, we introduce RealWebAssist, a novel benchmark designed to evaluate sequential instruction-following in realistic scenarios involving long-horizon interactions with the web, visual GUI grounding, and understanding ambiguous real-world user instructions. RealWebAssist includes a dataset of sequential instructions collected from real-world human users. Each user instructs a web-based assistant to perform a series of tasks on multiple websites. A successful agent must reason about the true intent behind each instruction, keep track of the mental state of the user, understand user-specific routines, and ground the intended tasks to actions on the correct GUI elements. Our experimental results show that state-of-the-art models struggle to understand and ground user instructions, posing critical challenges in following real-world user instructions for long-horizon web assistance.", + "bbox": [ + 99, + 294, + 464, + 598 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Introduction", + "text_level": 1, + "bbox": [ + 225, + 616, + 336, + 631 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As an integral part of people's daily life, many of our everyday tasks are performed on the internet. With the tremendous advances in open-ended agents driven by large reasoning models (LRMs) and vision-language models (VLMs), there has been increasing interest in engineering web-based agents that can assist humans with complex tasks on the web following humans' instructions (Zheng et al. 2024a; Nakano et al. 2022). Recent works have demonstrated the promising performance of web-based agents on planning (Putta et al. 2024; Wang et al. 2024; Yao et al. 2023) and Graphical User Interface (GUI) grounding (Cheng et al. 2024; Wu et al. 2024b; Gou et al. 2024; Yang et al. 2024; Xu et al. 2024), across diverse websites, tasks, and GUI interfaces.", + "bbox": [ + 81, + 633, + 478, + 814 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite these encouraging results, there have not been systematic studies on long-horizon web assistance with real-", + "bbox": [ + 83, + 815, + 478, + 843 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "world users. Existing benchmarks (e.g., (Zhou et al. 2023; Deng et al. 2024; Cheng et al. 2024; Yao et al. 2022; Jang et al. 2024)) typically focus on performing a task based on a single instruction. Additionally, the instructions in the current benchmarks were not collected from real users during natural web use sessions, lacking the realism of real user instructions. As a result, these benchmarks do not capture the full complexity of real users' web behavior and instructions.", + "bbox": [ + 514, + 273, + 913, + 383 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To bridge this gap, we propose RealWebAssist, the first sequential instruction following benchmark that evaluates long-horizon web assistance with real-world users. As illustrated in Figure 1, to perform a task, a user will instruct an AI assistant in a long sequence. Based on the past instructions and screenshots, the AI assistant must execute one or a few steps of actions to perform the latest instruction. Additionally, a user can engage in repeated interactions over a series of tasks with the assistant in a long session up to 40 minutes. To construct RealWebAssist, we recruited real users to instruct an assistant to perform multiple real-world tasks on the web. We created a large dataset with real user instructions (in both speech and text) for diverse real-world tasks and websites (as shown in Figure 2).", + "bbox": [ + 514, + 385, + 911, + 580 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The sequential instruction following tasks in our RealWebAssist benchmark reflect the natural human behavior on the web. First, real-world users may not initially know what they are looking for. Thus, they need to engage in information seeking on multiple web pages (e.g., step 1-2 in Figure 1), sometimes even across websites. Second, based on new information such as product reviews, users may change their minds (e.g., step 3). Third, users give simple instructions that are seemingly ambiguous out of the context but could be interpreted based on spatial and temporal context via pragmatic reasoning (Goodman and Frank 2016; Fried et al. 2023). For instance, the third instruction in Figure 1 does not explicitly describe which product, but an intelligent assistant should be able to infer the true user intent and correctly select the product in the user's mind. Lastly, in our benchmark, users can browse the websites and have the autonomy to make critical decisions (such as purchasing) on their own, which is complementary to existing benchmarks that focus on agents' planning ability to fully complete the tasks without human involvement.", + "bbox": [ + 514, + 580, + 913, + 858 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We systematically evaluate state-of-the-art models, including GUI grounding, VLMs, and large reasoning mod", + "bbox": [ + 516, + 859, + 911, + 888 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10445v2 [cs.AI] 1 Dec 2025", + "bbox": [ + 22, + 285, + 57, + 709 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*These authors contributed equally. Copyright © 2026, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.", + "bbox": [ + 81, + 849, + 478, + 888 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d264e9d9b78f0d24469e8645a6781a8b7b70be1f55ed0a83e526bba6feb6e03f.jpg", + "image_caption": [ + "Figure 1: An example sequential instruction following task with a real-world user. The red circles indicate the correct actions based on the user's spoken instructions. Sequential instructions introduce unique challenges, such as the need to retain and reason over past context. For instance, the instruction in step 3 requires information from step 1 to be correctly interpreted." + ], + "image_footnote": [], + "bbox": [ + 106, + 65, + 859, + 205 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d2e93543deec6e7df545c85e4659b87e11f9d991874be90009730826ed4e310e.jpg", + "image_caption": [ + "Figure 2: Examples of general task categories (left) and websites visited (right) in RealWebAssist. The tasks span a wide range of real-world scenarios, from shopping to food & entertainment to travel planning, which encourages users to visit many different websites." + ], + "image_footnote": [], + "bbox": [ + 127, + 272, + 851, + 415 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "els. Experimental results reveal that these models lack several key abilities, including grounding, understanding user intents, reasoning about spatial and temporal context, and adapting to user-specific routines.", + "bbox": [ + 81, + 494, + 480, + 551 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Related Works", + "text_level": 1, + "bbox": [ + 215, + 564, + 346, + 579 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Web Agent Benchmarks. Existing web agent benchmarks primarily evaluate the performance of web agents on tasks with clearly defined, unambiguous instructions, often overlooking the complexities of real-world users' behavior and their instructions to an AI assistant. On WebArena (Zhou et al. 2023), Mind2Web (Deng et al. 2024), and WebShop (Yao et al. 2022), an agent follows a single instruction to perform an isolated task. While they offer an evaluation of an agent's planning capacity, they lack the evaluation of an agent's ability to follow a long sequence of user instructions on long-horizon web tasks. There have also been GUI grounding benchmarks, such as ScreenSpot (Cheng et al. 2024), that focused on grounding simple instructions to clicking actions on webpages. These instructions only instruct web agents to click web elements rather than reaching a user goal (e.g., purchasing an item). WebLINX (Lü, Kasner, and Reddy 2024) features sequential instruction following. However, the instructions were generated by annotators who received detailed guidelines and extensive training, rather than by actual users. The resulting instructions do not capture the nuances and complexity of real-world user instructions that naturally emerge in interactions with an as-", + "bbox": [ + 81, + 583, + 478, + 888 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "sistent. In contrast, RealWebAssist consists of sequential instruction following tasks for assisting real-world users, providing a novel set of challenges necessary for long-horizon web assistance for real-world users. Table 1 summarizes key differences between RealWebAssist and prior benchmarks.", + "bbox": [ + 514, + 494, + 911, + 565 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Autonomous Web Agents. There have been many recent works on engineering autonomous web agents through retrieval augmented planning (Kim et al. 2024; Zhou et al. 2024; Wu et al. 2024a; He et al. 2024; Pan et al. 2024), finetuning (Hong et al. 2024; Gur et al. 2024; Deng et al. 2024; Pang et al. 2024; Zhang and Zhang 2024), learning workflows (Zhang et al. 2023; Wang et al. 2024; Zheng et al. 2024b; Majumder et al. 2023; Cai et al. 2024), reinforcement learning (Liu et al. 2018; Shi et al. 2017; Nogueira and Cho 2016; Humphreys et al. 2022), and combinations of these methods (Liu et al. 2023; Putta et al. 2024). These works focus on planning for a single task. However, there has not been much work on understanding and following real-world users' sequential instructions on long-horizon tasks.", + "bbox": [ + 514, + 566, + 913, + 762 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "GUI Grounding. One key ability for web agents in many assistance tasks is to ground instructions to clicking actions on a webpage. Recent works have explored VLM finetuning (e.g., (Gou et al. 2024; Wu et al. 2024b; Yang et al. 2024, 2025; Wu et al. 2025; Qin et al. 2025; Xu et al. 2025; Yuan et al. 2025)) as well as prompting pretrained VLMs with segmentations of web elements (e.g., (Yang et al. 2023)) for enabling GUI grounding. These methods generate coordinates or bounding boxes on webpages to indicate where to click.", + "bbox": [ + 514, + 763, + 913, + 888 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/aba113943b6a24b652e0b42d4d023fff645dee9bacf04ec2456b975dd33c656f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BenchmarkReal UserSequential InstructionsReal WebsitesGUI GroundingSpeech# Instructions
SreenSpot (Cheng et al. 2024)XXX1200+
WebArena (Zhou et al. 2023)XXXXX812
Mind2Web (Deng et al. 2024)XXXX2000+
WebLINX (Lù, Kasner, and Reddy 2024)XXX512
VideoWebArena (Jang et al. 2024)XXXX2021
WebShop (Yao et al. 2022)XXXXX12087
BearCubs (Song et al. 2025)XXXX111
RealWebAssist (Ours)1885
", + "bbox": [ + 173, + 65, + 823, + 213 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1: Comparison between RealWebAssist and existing web agent benchmarks on several key aspects: (1) whether instructions were given by real-world users instead of annotators, (2) whether there is a sequence of instructions, (3) whether there are real-world websites, (4) whether the agent needs to execute actions by selecting coordinates on webpages, (5) whether the instructions are speech instructions, and (6) the number of total instructions.", + "bbox": [ + 81, + 229, + 911, + 287 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/3262cf9379b85a21d2a4c5c9a55cea3280c0ffedb92bc61498969f0ae3157c4a.jpg", + "image_caption": [ + "\"Ok, buy this item\"" + ], + "image_footnote": [], + "bbox": [ + 84, + 333, + 264, + 415 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/5e15a46f4526020f333c8caa9d15eeec63cb8f6981f624f2ab01f59fc343101e.jpg", + "image_caption": [ + "\"Let's do All Airports\"", + "Figure 3: Multiple actions can satisfy a user's intent. A web agent's action is considered correct if the coordinate they provide is within one of the annotated correct regions." + ], + "image_footnote": [], + "bbox": [ + 274, + 333, + 452, + 415 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "They have only been trained on low-level instructions that clearly refer to web elements. It remains unclear if they can understand real-world user instructions that must be interpreted considering context or may refer to high-level goals.", + "bbox": [ + 81, + 497, + 478, + 555 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "RealWebAssist Benchmark", + "text_level": 1, + "bbox": [ + 165, + 569, + 395, + 584 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Problem Setup", + "text_level": 1, + "bbox": [ + 83, + 590, + 202, + 606 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "RealWebAssist evaluates agents' ability to follow long-horizon, sequential web instructions to assist users with their high-level goals. In each task, a human user will try to reach an open-ended goal such as \"buy formal outfits for a formal event\" by instructing the assistant through a series of spoken instructions. The dataset is collected from interactions between human users and human assistants in a human experiment. To evaluate agents, we use the human assistants' actions to evaluate the agents' success.", + "bbox": [ + 81, + 609, + 477, + 734 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In RealWebAssist, a web agent has access to the current instruction, webpage (as a screenshot), and all the past interactions (previous instructions & screenshots of webpages). Since we are focusing on tasks on real-world websites, it is challenging to ensure safety and reproducibility in an interactive evaluation setting. Therefore, we adopt an offline evaluation setting following prior web-based agent benchmarks with real websites (Deng et al. 2024; Cheng et al. 2024). Specifically, for each instruction collected from the human experiment, the agent needs to identify the correct element to interact with by providing a coordinate or a bound", + "bbox": [ + 81, + 736, + 478, + 888 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ing box to click on the webpage. As shown by figure 3, a web agent's action is considered correct if the coordinate or the center of the bounding box they provide falls in the annotated correct regions on the webpage. If there are multiple steps corresponding to one instruction, we evaluate if the web agent's actions for the same instruction are all correct.", + "bbox": [ + 514, + 314, + 911, + 398 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Evaluation Metrics", + "text_level": 1, + "bbox": [ + 517, + 407, + 668, + 422 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We consider the following evaluation metrics:", + "bbox": [ + 517, + 425, + 820, + 440 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Task success rate: A task is successful if the web agent can correctly produce actions for all instructions in a task.", + "- Average progress: We measure the progress of a task by the percentage of consecutive instructions the web agent can successfully perform before its first error in the task.", + "- Step success rate: We also consider a teacher forcing setting as a simpler, diagnostic evaluation, where the web agent will only need to follow the instruction at a single step of a task assuming all previous instructions have been successfully performed." + ], + "bbox": [ + 522, + 443, + 911, + 580 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Dataset Construction", + "text_level": 1, + "bbox": [ + 516, + 593, + 684, + 607 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Setup. We recruited 10 participants (4 female, 6 male, mean age = 20 years) from a US university campus, none of whom had prior knowledge of the study's purpose, to construct the dataset. All participants were native or fluent English speakers. Each participant completed a 40-minute real-world web assistance session in which they tackled a series of open-ended tasks designed to encourage diverse strategies. During each session, participants verbally instructed an experimenter, who operated the computer on their behalf, to complete the tasks. We captured screen recordings and used a high-quality USB microphone to record speech as raw data. The user study was approved by an institutional review board.", + "bbox": [ + 514, + 611, + 911, + 789 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "User Tasks. To increase the instruction diversity and realism, participants received general web-based tasks requiring active information seeking, sub-goal planning, and comparison among various options. We generated the task list by few-shot prompting GPT-4o with open-ended tasks, followed by manual filtering and editing to ensure task quality and feasibility. These tasks provide only general guidance,", + "bbox": [ + 514, + 791, + 911, + 888 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ensuring flexibility for personal decision-making. Example tasks include \"Purchase an outfit for a formal event\" and \"Plan a 5-day trip to Japan, booking both flights and hotels\". Each user finishes about 10 tasks.", + "bbox": [ + 83, + 68, + 478, + 125 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Emergent User Behavior. In our realistic, open-ended settings, users exhibit rich behaviors that are not present in previous benchmarks. These include, but are not limited to, information seeking, researching and comparing different options, change of mind, and trial-and-error.", + "bbox": [ + 83, + 125, + 478, + 193 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Annotations. We manually labeled RealWebAssist data to ensure high-quality annotations. We first segmented the full recording into individual clips corresponding to each user's instructions. In our benchmark, we disregard user speech unrelated to explicit instructions for the assistant, such as filler words or verbalized thought processes. For each instruction, we provide raw speech, speech transcript, webpage, and the correct regions to click (in the form of one or more bounding boxes). When there were multiple correct answers for the instructions (for instance, \"can you close all the current tabs\"), we annotated all correct regions with multiple bounding boxes. When the experimenter made a mistake during the data collection sessions, we annotated the correct action intended by the user. If an instruction required multiple steps to complete, we set the instruction at each step as the same instruction. To generate the text instructions, we used an off-the-shelf recognition model, Whisper Large-V3 (Radford et al. 2023), to transcribe users' speech and then manually fixed transcription errors. For all the instructions, we have three annotators verifying all of them, ensuring $100\\%$ agreement.", + "bbox": [ + 83, + 193, + 478, + 484 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Dataset Statistics. RealWebAssist contains 1,885 user instructions across 107 tasks, 66 websites, and 2,524 screenshots. In addition to the benchmark, we also plan to release the raw data, consisting of over 6 hours of video & audio.", + "bbox": [ + 83, + 484, + 478, + 541 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Key Challenges", + "text_level": 1, + "bbox": [ + 84, + 550, + 207, + 566 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "RealWebAssist features multiple challenges as illustrated in Figure 4, including spatial and temporal reasoning needed to understand ambiguous and context-dependent user instructions, planning for multiple steps of actions to reach the goal communicated by an instruction, and learning about user-specific routines. These key challenges provide a more realistic and holistic evaluation of a web agent's reasoning, planning, and learning abilities to assist real-world users on long-horizon tasks. It is worth noting that many of these challenges, in particular, spatial reasoning, temporal reasoning, and routine understanding, are not present in existing web agent benchmarks. Unlike RealWebAssist, prior benchmarks, such as ScreenSpot (Cheng et al. 2024), WebArena (Zhou et al. 2023), and Mind2Web (Deng et al. 2024), only include clear, unambiguous, and non-sequential instructions.", + "bbox": [ + 83, + 569, + 478, + 777 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Spatial Reasoning. When referring to one of the elements on a webpage, real-world users tend to use a concise instruction that can be understood conditioned on spatial context instead of an overly elaborated instruction. For instance, when instructing an assistant to buy a product, users may give short instructions such as \"select the cheapest one,\" instead of describing the desired product in detail. Figure 4A depicts different types of spatial reasoning that rely on di", + "bbox": [ + 81, + 777, + 478, + 888 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "verse spatial contexts, including ranking, spatial relations, and overall website functionalities. It is worth noting that these instructions may sometimes reveal users' preferences (e.g., preferred seating), providing additional information for the web agent to provide potentially more customized assistance in the future.", + "bbox": [ + 516, + 69, + 911, + 151 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Temporal Reasoning. In our sequential instruction following tasks, users may instruct an assistant with the history as an assumed temporal context. For example, to understand the intended meaning of \"click the last item,\" the assistant must memorize the items the user has viewed in the past. Figure 4B shows temporal reasoning based on different kinds of temporal context, ranging from short context between two consecutive webpages to long context with the same website to long context across websites. From the temporal context, the assistant needs to memorize crucial elements in the previous webpages, infer and track a user's mind (e.g., change of mind about what to buy) based on the past instructions and webpages, and identify the earlier webpage the user refers to. Such temporal reasoning has not been evaluated in prior web agent benchmarks. However, it is very common in our benchmark due to the nature of human web browsing behavior as well as human instructions guided by pragmatics (Goodman and Frank 2016).", + "bbox": [ + 516, + 170, + 911, + 421 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Multi-step Planning. Many instructions require multiple steps to complete. In these cases, the assistant needs to interpret the goal implied by the instruction and plan a sequence of actions to achieve that goal. This goes beyond grounding the instruction to a single action on the current webpage. Figure 4C shows an example where the agent was asked to repeat the same order on another food delivery website to check if the price would be different. A successful execution of this instruction would require the agent to first understand what the order is to ground the goal on the current website and generate a successful multi-step plan.", + "bbox": [ + 516, + 439, + 913, + 592 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Routine. Since our benchmark allows a user to engage in repeated interactions with an assistant over multiple tasks, we observe that users may define routines understood by the assistant after repeated interactions. As shown in Figure 4D, the user initially gave detailed step-by-step instructions when selecting arrival and departure dates for a flight. In a subsequent task, however, the user simplified them into a single instruction when selecting dates for a hotel room. Such shorter instructions become possible after establishing a routine in the earlier task. Cognitive studies found that procedural abstraction, like these routines, naturally emerges in human cooperative communication through repeated interactions, allowing more efficient communication with partners (McCarthy et al. 2021). The emergence of such routines in our benchmark poses a novel challenge for web agents—learning user-specific procedural abstraction via repeated interactions to achieve human-like adaptive assistance. We hypothesize that this ability could enhance users' perception of the AI assistant, as it understands human cooperative communication.", + "bbox": [ + 516, + 611, + 913, + 888 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A Spatial Reasoning", + "text_level": 1, + "bbox": [ + 99, + 73, + 321, + 95 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/ba5e71d4116049f60b5bc9cd16dfe9a18244ef18f0bc79b68f33743507ec46fb.jpg", + "image_caption": [ + "\"Can you click on the seventh tab?\"" + ], + "image_footnote": [], + "bbox": [ + 112, + 138, + 233, + 198 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/2eead0b82df55cb899f35efdc9503c8c8ebdfc8a7ecc837bd3f5b544ab79cf9d.jpg", + "image_caption": [ + "Ranking", + "\"And let's just get the lowest price tickets\"" + ], + "image_footnote": [], + "bbox": [ + 259, + 138, + 401, + 198 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/7a41bced840724c2c6501da63e165f94557a7e0f02f89d9734f7562716e63e71.jpg", + "image_caption": [ + "Spatial relations", + "\"Can you click the arrow between the two\"" + ], + "image_footnote": [], + "bbox": [ + 431, + 138, + 563, + 196 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d855e2cf8493411bd282771d1d34a5bab6d962d3dd3397a281310a818ed5544c.jpg", + "image_caption": [ + "Only select the two seats on the top" + ], + "image_footnote": [], + "bbox": [ + 589, + 138, + 722, + 196 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/23a81543b1cddff082f6d87c6f256bc893c2a538cd4253b22058339db2d13e45.jpg", + "image_caption": [ + "Website functions", + "\"Change the end date from 20 to 22nd\"" + ], + "image_footnote": [], + "bbox": [ + 754, + 137, + 880, + 196 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "B Temporal Reasoning", + "text_level": 1, + "bbox": [ + 99, + 202, + 346, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/63902a9f6317abf275642953e8b40809c46f12a3b365a3c43d271fe3a613ffd9.jpg", + "image_caption": [ + "Previous webpage", + "\"Goto the previous tab\"" + ], + "image_footnote": [], + "bbox": [ + 107, + 257, + 233, + 319 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/cb5d8e54ab382218c82963c419815a5545b40036eb8a04989b805fb5a8943dca.jpg", + "image_caption": [ + "\"No, stay on that page\"" + ], + "image_footnote": [], + "bbox": [ + 259, + 257, + 393, + 316 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9a13fc77395a1b436583c1dc8a2907791f0c87b3cee6c2fd0a7775d0ee8bcce6.jpg", + "image_caption": [ + "Long context within the same website", + "\"Click on HP laptop\"", + "Long context across multiple websites" + ], + "image_footnote": [], + "bbox": [ + 424, + 256, + 555, + 319 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/3a00d16673abd9bdbb7dc446ffd3e9a5cef52dfef2decba6e9b846a3e3006f18.jpg", + "image_caption": [ + "\"Can you check ASUS?\"" + ], + "image_footnote": [], + "bbox": [ + 596, + 256, + 722, + 318 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/8f9ec67c1254812380c80b3c73fa1c66e3a246c33c0fee2617f7b7621f8fb749.jpg", + "image_caption": [ + "\"Go back to the other laptop\"" + ], + "image_footnote": [], + "bbox": [ + 769, + 256, + 885, + 318 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/62cf7409464e0df9224fbc514bcfada2521f09cde0a5801f98f118c975b7f24f.jpg", + "image_caption": [ + "\"Can you look at the next tab as well?\"" + ], + "image_footnote": [], + "bbox": [ + 104, + 361, + 233, + 421 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e47eaaa5ae2da59bed97e795c4703cf3c7e3ddd2ecd60129652f86f4bc7e9674.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 156, + 422, + 179, + 438 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/304fb0a3a410fe3d67552f171bcbdaf6ecdaff35d16ea1f23c4ff48f91fa6670.jpg", + "image_caption": [ + "\"Oh, this is like 95 bucks. Can you press the other tab\"", + "CityPASS" + ], + "image_footnote": [], + "bbox": [ + 233, + 361, + 431, + 421 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/201d9e2f83e91b7ffbe99cf13a4b2f44ec488e6b23d6d455f891891130e95907.jpg", + "image_caption": [ + "\"OK, can you open a new tab and search for ...\"" + ], + "image_footnote": [], + "bbox": [ + 429, + 361, + 558, + 421 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/8546b193f6d0948328afa144cb4d9752a6d699f8d9b30d3fcd79c63fc862dbcf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 485, + 422, + 506, + 439 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1eed3cbba66d7499656e171b12493d30247b2ce027294fbe4eca315736ec9e3a.jpg", + "image_caption": [ + "\"This is 36. Can you go back to CN Tower's official website\"" + ], + "image_footnote": [], + "bbox": [ + 558, + 361, + 723, + 421 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/01d4f6a679a498b0e3393f2d373eb46be9fc4e2c1ef4a99929fdd0f7130bbbfd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 647, + 422, + 674, + 441 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/723238ade76f571ce35b12a7b4568326c588d00e3cdcc48105287acc73f8c99e.jpg", + "image_caption": [ + "\"I'd probably get the city pass option\"" + ], + "image_footnote": [], + "bbox": [ + 764, + 361, + 885, + 421 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1adfabb0cb91990ce5bddb0cf1cc7badf04e77434e247af1898187b45eb832b4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 816, + 422, + 839, + 440 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "C Multi-step planning", + "text_level": 1, + "bbox": [ + 99, + 448, + 343, + 468 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/866645a1921b87066ab8f5f51fe034ee26f062d6eabfb1a6d939229e7b2578e6.jpg", + "image_caption": [ + "\"Can you go to DoorDash and order the same thing to compare the price?\"", + "History (not shown here): The user previously ordered Snooze melt from Meltdown and selected French Fries" + ], + "image_footnote": [], + "bbox": [ + 215, + 489, + 346, + 541 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4f1154756e2c1ea4c745646e35c604eedd93c1b1fcccdb866bdd547aa94e8897.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 379, + 493, + 488, + 542 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/c76f097e0d7a8be85904af9e0ba26cc8688b241d6345fd0e81205598c4cfec7a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 531, + 489, + 629, + 542 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0bda5b47032a5ddbecdab3f3d2d1a16c54e28d78bceb97b1b655b1b06d0f4b3a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 489, + 754, + 541 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b44e9d6483d35381371556f2f7874777315f6c1b282d7f8ece0c0bb172957c07.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 787, + 489, + 883, + 542 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "D Routine", + "text_level": 1, + "bbox": [ + 101, + 551, + 215, + 568 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/13d42bd21ef1b84253b992a925229d3a9601fb7eb54415e8caeb8b799ee8b7b6.jpg", + "image_caption": [ + "\"Can we go to the dates?\"", + "\"And for dates do 3.17 to 3.21\"", + "Earlier task: select dates for a round-trip flight", + "Later task: select dates for a hotel stay" + ], + "image_footnote": [], + "bbox": [ + 220, + 564, + 339, + 623 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b930b1c1eb8244d7e57f1f9e3f48151543e6440e4c9c22d1b7738667ce567590.jpg", + "image_caption": [ + "\"Can we select April 7th?\"" + ], + "image_footnote": [], + "bbox": [ + 398, + 564, + 509, + 622 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/8022060c99b56e9562de56293f5888d82c66f54c2b561b33fb7e638919355843.jpg", + "image_caption": [ + "\"And then April 14th\"" + ], + "image_footnote": [], + "bbox": [ + 586, + 563, + 699, + 622 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9f7d7479bdb7dfe1cbc2f6af2434f79d35b23ee13e316a307b48b24a38f0979c.jpg", + "image_caption": [ + "\"And hit done\"" + ], + "image_footnote": [], + "bbox": [ + 771, + 561, + 890, + 622 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/3582dddae1e60403cc5a6b850ad94c9bf2fadd65918f9cdaa0af5f00f8e90161.jpg", + "image_caption": [ + "Figure 4: Key challenges introduced by RealWebAssist: (A) spatial reasoning, (B) temporal reasoning, (C) multi-step planning, and (D) learning user-specific routines." + ], + "image_footnote": [], + "bbox": [ + 258, + 635, + 375, + 691 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "\"And for dates do 3.17 to 3.21\"", + "bbox": [ + 254, + 625, + 390, + 633 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0ecfc95aeb49af0705291a27e30780180994aefb82a293996b90508495262b86.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 633, + 622, + 690 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9c19821c5df1f6bd8f461904cde24337092636ed53ed5bafbef20c8cbd7b1197.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 748, + 633, + 870, + 690 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Experiments", + "text_level": 1, + "bbox": [ + 225, + 763, + 336, + 780 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines", + "text_level": 1, + "bbox": [ + 83, + 784, + 158, + 797 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluated several types of models for web agents commonly evaluated in existing web agent benchmarks that have real-world websites (i.e., offline evaluation). For all the experiments, we use the ground-truth captions for instructions.", + "bbox": [ + 81, + 804, + 478, + 861 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "GUI Grounding Models. GUI grounding models directly translate an instruction to an action on a webpage. There are", + "bbox": [ + 83, + 861, + 478, + 888 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "two general types of grounding models. First, Set-of-Mark (SoM) (Yang et al. 2023) segments salient elements on a webpage using an off-the-shelf segmentation model (e.g., SAM (Kirillov et al. 2023) and Semantic-SAM (Li et al. 2023)) and prompts a VLM to select a segment mask to identify the clicking area corresponding to the given instruction. Second, VLMs finetuned on datasets with paired instructions and annotated clicking coordinates or bounding", + "bbox": [ + 514, + 763, + 913, + 876 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "boxes. We evaluated UGround-V1 (Gou et al. 2024), OSAtlas (Wu et al. 2024b), Aria-UI (Yang et al. 2024), GTA-1 (Yang et al. 2025), GUI-Actor (Wu et al. 2024a), and UI-TARS (Qin et al. 2025).", + "bbox": [ + 83, + 68, + 478, + 125 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "VLM/LRM + Grounding. Grounding models are designed or trained to ground a simple instruction to a webpage and thus tend to lack reasoning or planning capabilities. To address this, we leveraged VLMs and LRMs to first translate real user instructions to more understandable ones for grounding models. In particular, a VLM or an LRM needs to reason about the true user intent implied by the instruction and the spatial & temporal context. For instructions that require multiple actions, it needs to generate a plan to complete the instructions. Finally, it needs to generate a straightforward, clear instruction for the grounding model to produce the final action at each step. We evaluated state-of-the-art VLMs (OpenAI 2023; Team 2025; Qwen et al. 2025), as well as state-of-the-art LRMs (Jaech et al. 2024; Team 2025; Anthropic 2025). In the main results, we paired each VLM and LRM with the grounding model that achieved the highest step accuracy (GTA-1). For all VLMs and LRMs, we provide the past 10 steps for context, which we found to be a reasonable fixed context length in our preliminary study, balancing cost and informativeness. We also found that prompting models with screenshots of past webpages could incur a high cost. Therefore, we only prompt the models with the screenshot of the current webpage. For the history, we prompted GPT-4o to generate text-based action history based on consecutive screenshots and the instructions at each step. We then used this text-based history description for the evaluated VLMs and LRMs.", + "bbox": [ + 83, + 125, + 478, + 496 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Finetuning. To evaluate whether models can learn to better follow real-world user instructions with additional training, we finetuned the best-performing grounding model (GTA-1) following the model's original group relative policy optimization (GRPO) training procedure (Yang et al. 2025) on 9 participants' data and tested it on the held-out participants' instructions. Specifically, we trained the grounding model to produce an action based on the past 10 steps of actions (in text), the current webpage screenshot, and the instruction. We enumerated different train/test splits and reported the averaged performance, either using the finetuned model alone or pairing it with the best VLM or LRM.", + "bbox": [ + 81, + 498, + 478, + 664 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results", + "text_level": 1, + "bbox": [ + 84, + 676, + 143, + 689 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Main results are summarized in Table 3. All models fell short in following real user instructions. The highest task success rate was only $14.0\\%$ and the highest average progress was only $28.7\\%$ a large gap compared to humans $(93.4\\%)$ task success rate). This difference has a $95\\%$ confidence interval of [71.3, 87.5], and is highly significant with p-value $< 0.0001$ . Grounding methods by themselves failed to finish most tasks. However, when paired with the best-performing grounding model (GTA-1), instructions generated by VLMs & LRMs significantly improved the performance. LRMs performed marginally better than most VLMs. Across all three metrics, Gemini 2.5 Flash, Gemini 2.5 Pro, and o3 showed the strongest performance. Finetuning GTA-1 on real user data marginally improved its perfor", + "bbox": [ + 81, + 694, + 480, + 888 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "mance, but finetuning offered no benefit when GTA-1 was paired with VLMs and LRMs, since the finetuned model is trained to adapt to real users' instructions instead of instructions generated by VLM or LRM.", + "bbox": [ + 516, + 69, + 911, + 125 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Discussion", + "text_level": 1, + "bbox": [ + 668, + 137, + 761, + 151 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Can grounding models understand real-world user instructions? There remains a significant gap in the performance of current direct grounding methods. The best grounding model, GUI-Actor, has a task success rate of only $5.7\\%$ . Figure 5 illustrates various failure cases encountered when directly using GTA-1. Unsurprisingly, grounding models fail to interpret instructions requiring reasoning due to their limited reasoning capabilities. However, even for context-free instructions involving straightforward spatial reasoning—tasks where grounding methods should excel—they frequently misinterpret spatial layouts or rankings. For instance, they often incorrectly select elements for instructions such as \"click the first one.\"", + "bbox": [ + 516, + 154, + 911, + 333 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "How can VLMs & LRMs help? VLMs or LRMs can convert the original user instructions into more direct and explicit descriptions that a grounding model can more easily understand. This is made possible by their reasoning capacities. For instance, in Figure 5A, the grounding model (GTA-1) on its own fails to select the first tab: it selects the first element instead of the first tab. However, it succeeds after o3 rewrites the instruction to refer to the title. As shown in Figure 5B, grounding models may sometimes still fail due to inherent limitations even when VLMs/LRMs generate clearer instructions. Nonetheless, incorporating VLMs or LRMs significantly improves overall performance.", + "bbox": [ + 514, + 334, + 911, + 500 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "What are the limitations of VLMs & LRMs? While VLMs and LRMs help, the highest task success rate is still only $14.0\\%$ . Beyond errors from grounding models (e.g., Figure 5B), they continue to struggle with complex temporal reasoning. In Figure 5C, the user previously asked to open the first two search results in new tabs. When later instructed to \"look at the first one we just opened,\" o3 failed to identify which element \"the first one\" referred to—instead of the first newly opened tab, it pointed to the first search result. We further analyze the error distribution between reasoning errors (the VLM/LRM mistranslates the instruction and refers to the wrong element) and grounding errors (the rewritten instruction is correct, but the grounding model still fails to click the right element). For the best model $(\\mathrm{o}3 + \\mathrm{GTA} - 1)$ , $43.3\\%$ of errors are grounding errors and $56.7\\%$ are reasoning errors. This suggests that current VLMs and LRMs still lack the reasoning and planning abilities needed to robustly perform sequential instruction-following tasks.", + "bbox": [ + 514, + 501, + 913, + 750 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Does learning from real-world user data help? Finetuning GTA-1 marginally improved average progress and step accuracy but yielded no additional benefit when paired with VLMs and LRMs. These results show that the finetuned model better understands real user instructions, yet it still fails to generalize to instructions generated by VLMs and LRMs. The experiments suggest that finetuning grounding models on a small set of real user instructions provides minimal benefit, and collecting large-scale real user instructions remains a significant challenge.", + "bbox": [ + 514, + 750, + 913, + 888 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/7e15864243a34993ec1a5ccc34c216e8c768944297d0dc2929d319e413d09841.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryModelTask SuccessProgressStep Accuracy
HumanHuman Operator93.496.499.2
GroundingSet-of-Mark0.02.729.8
OS-Atlas0.03.826.6
Aria-UI0.02.432.8
UGround-V10.06.247.7
UI-TARS2.813.153.8
GTA-13.717.761.5
GUI-Actor5.714.761.4
VLM + GroundingGPT-4o + GTA-18.423.572.7
Qwen 2.5 72B + GTA-19.324.369.0
Gemini 2.5 Flash + GTA-111.226.975.4
LRM + Groundingo1 + GTA-17.517.768.2
Gemini 2.5 Pro + GTA-18.423.574.5
o4-mini + GTA-110.321.767.1
Claude 3.7 Sonnet + GTA-112.126.768.8
o3 + GTA-114.028.776.7
FinetunedGTA-1-F3.7 (+0.0)19.7 (+2.0)64.3 (+2.8)
Gemini 2.5 Flash + GTA-1-F11.2 (+0.0)26.9 (+0.0)75.4 (+0.0)
o3 + GTA-1-F14.0 (+0.0)28.7 (+0.0)76.7 (+0.0)
", + "bbox": [ + 147, + 64, + 849, + 364 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Model Performance including task success rate, average progress, and step accuracy. All results are in %. The best performance of pretrained models and finetuned models is highlighted in bold. GTA-1-F indicates the finetuned GTA-1. Plus sign indicates the improvement compared to using the raw model for the same set of instructions.", + "bbox": [ + 81, + 375, + 915, + 419 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7efbfd1a507fb1217230f8a285f7a1064e7d85faba0ce51e3b238de71309c6dd.jpg", + "image_caption": [ + "Figure 5: Qualitative results. The captions show instructions generated by o3 (the best LRM). (A) Error corrected by using o3 to convert instructions. (B) Failure caused by GTA-1 when o3 reasons correctly. (C) Reasoning failure caused by o3." + ], + "image_footnote": [], + "bbox": [ + 89, + 452, + 323, + 681 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1560028b005585f00b7331b8ab6dda9bf201b15c7b0fadbaf391295da2699617.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 326, + 452, + 545, + 680 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4d4d39ce25fbccf3bc1671ae26309bdc0979314bc11fc2de8468d2c3799b9ef5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 545, + 452, + 890, + 680 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Limitations. RealWebAssist represents an important first step towards evaluating web agents on long-horizon, real-user tasks. However, it has several limitations. The first is participant scale and diversity. Collecting real-user data is expensive and time-consuming. The number of participants is comparable to prior works that use expert annotators (Lu, Kasner, and Reddy 2024). However, we intend to increase user diversity in future versions of the benchmark. We will also open-source our data collection tools for community expansion of the dataset. Second, like prior benchmarks on", + "bbox": [ + 81, + 744, + 480, + 885 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "real-world websites (Deng et al. 2024; Cheng et al. 2024), we constrain our evaluation to an offline setting to ensure reproducibility and safety. This is complementary to benchmarks that focus on interactive evaluation in sandbox environments (e.g., WebArena). We believe that web agents should be evaluated on both types of benchmarks to fully assess their capabilities. Lastly, the current setting does not allow dialogue between a user and the AI assistant, which we will explore in future work.", + "bbox": [ + 514, + 744, + 913, + 869 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Conclusion", + "text_level": 1, + "bbox": [ + 232, + 66, + 330, + 82 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we present RealWebAssist, the first benchmark for evaluating web agents' ability to provide long-horizon web assistance with real-world users via sequential instruction-following. Our benchmark poses novel challenges, including spatial and temporal reasoning, planning, and adapting to user-specific routines. We conducted a comprehensive evaluation and analysis on multiple state-of-the-art GUI grounding models, VLMs, and LRMs, revealing critical limitations of them. We have also shown the limited benefit of finetuning models on real user data. Our benchmark, along with the well-annotated user instruction dataset, provides resources and diagnostic tools for further research on real-world web assistance. In future work, we plan to expand our human study to include more participants from various backgrounds, examine web assistance in interactive settings, and incorporate chat between users and web agents.", + "bbox": [ + 81, + 85, + 480, + 311 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 197, + 321, + 364, + 338 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported by a research grant from Amazon. We thank Janice Chen for helpful discussions.", + "bbox": [ + 83, + 340, + 478, + 369 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 233, + 383, + 328, + 397 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Anthropic. 2025. Claude 3.7 Sonnet and Claude Code. https://www.anthropic.com/news/claudi-3-7-sonnet. Accessed: 2025-03-17.", + "Cai, T.; Wang, X.; Ma, T.; Chen, X.; and Zhou, D. 2024. Large Language Models as Tool Makers. arXiv:2305.17126.", + "Cheng, K.; Sun, Q.; Chu, Y.; Xu, F.; Li, Y.; Zhang, J.; and Wu, Z. 2024. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935.", + "Deng, X.; Gu, Y.; Zheng, B.; Chen, S.; Stevens, S.; Wang, B.; Sun, H.; and Su, Y. 2024. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36.", + "Fried, D.; Tomlin, N.; Hu, J.; Patel, R.; and Nematzadeh, A. 2023. Pragmatics in Language Grounding: Phenomena, Tasks, and Modeling Approaches. arXiv:2211.08371.", + "Goodman, N. D.; and Frank, M. C. 2016. Pragmatic language interpretation as probabilistic inference. Trends in cognitive sciences, 20(11): 818-829.", + "Gou, B.; Wang, R.; Zheng, B.; Xie, Y.; Chang, C.; Shu, Y.; Sun, H.; and Su, Y. 2024. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243.", + "Gur, I.; Furuta, H.; Huang, A.; Safdari, M.; Matsuo, Y.; Eck, D.; and Faust, A. 2024. A Real-World WebAgent with Planning, Long Context Understanding, and Program Synthesis. arXiv:2307.12856.", + "He, H.; Yao, W.; Ma, K.; Yu, W.; Dai, Y.; Zhang, H.; Lan, Z.; and Yu, D. 2024. WebVoyager: Building an End-to-End Web Agent with Large Multimodal Models. arXiv:2401.13919.", + "Hong, W.; Wang, W.; Lv, Q.; Xu, J.; Yu, W.; Ji, J.; Wang, Y.; Wang, Z.; Zhang, Y.; Li, J.; Xu, B.; Dong, Y.; Ding, M.; and Tang, J. 2024. CogAgent: A Visual Language Model for GUI Agents. arXiv:2312.08914." + ], + "bbox": [ + 84, + 402, + 478, + 888 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Humphreys, P. C.; Raposo, D.; Pohlen, T.; Thornton, G.; Chhaparia, R.; Muldal, A.; Abramson, J.; Georgiev, P.; Santoro, A.; and Lillicrap, T. 2022. A data-driven approach for learning to control computers. In International Conference on Machine Learning, 9466-9482. PMLR.", + "Jaech, A.; Kalai, A.; Lerer, A.; Richardson, A.; El-Kishky, A.; Low, A.; Helyar, A.; Madry, A.; Beutel, A.; Carney, A.; et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720.", + "Jang, L.; Li, Y.; Zhao, D.; Ding, C.; Lin, J.; Liang, P. P.; Bonatti, R.; and Koishida, K. 2024. Videowebarena: Evaluating long context multimodal agents with video understanding web tasks. arXiv preprint arXiv:2410.19100.", + "Kim, M.; Bursztyn, V.; Koh, E.; Guo, S.; and Hwang, S.-w. 2024. Rada: Retrieval-augmented web agent planning with llms. In Findings of the Association for Computational Linguistics ACL 2024, 13511-13525.", + "Kirillov, A.; Mintun, E.; Ravi, N.; Mao, H.; Rolland, C.; Gustafson, L.; Xiao, T.; Whitehead, S.; Berg, A. C.; Lo, W.-Y.; Dollar, P.; and Girshick, R. 2023. Segment Anything. arXiv:2304.02643.", + "Li, F.; Zhang, H.; Sun, P.; Zou, X.; Liu, S.; Yang, J.; Li, C.; Zhang, L.; and Gao, J. 2023. Semantic-SAM: Segment and Recognize Anything at Any Granularity. arXiv preprint arXiv:2307.04767.", + "Liu, E. Z.; Guu, K.; Pasupat, P.; Shi, T.; and Liang, P. 2018. Reinforcement learning on web interfaces using workflow-guided exploration. arXiv preprint arXiv:1802.08802.", + "Liu, Z.; Yao, W.; Zhang, J.; Xue, L.; Heinecke, S.; Murthy, R.; Feng, Y.; Chen, Z.; Niebles, J. C.; Arpit, D.; et al. 2023. Bolaa: Benchmarking and orchestrating llm-augmented autonomous agents. arXiv preprint arXiv:2308.05960.", + "Lü, X. H.; Kasner, Z.; and Reddy, S. 2024. Weblinx: Realworld website navigation with multi-turn dialogue. arXiv preprint arXiv:2402.05930.", + "Majumder, B. P.; Mishra, B. D.; Jansen, P.; Tafjord, O.; Tandon, N.; Zhang, L.; Callison-Burch, C.; and Clark, P. 2023. CLIN: A Continually Learning Language Agent for Rapid Task Adaptation and Generalization. arXiv:2310.10134.", + "McCarthy, W. P.; Hawkins, R. D.; Wang, H.; Holdaway, C.; and Fan, J. E. 2021. Learning to communicate about shared procedural abstractions. arXiv preprint arXiv:2107.00077.", + "Nakano, R.; Hilton, J.; Balaji, S.; Wu, J.; Ouyang, L.; Kim, C.; Hesse, C.; Jain, S.; Kosaraju, V.; Saunders, W.; Jiang, X.; Cobbe, K.; Eloundou, T.; Krueger, G.; Button, K.; Knight, M.; Chess, B.; and Schulman, J. 2022. WebGPT: Browser-assisted question-answering with human feedback. arXiv:2112.09332.", + "Nogueira, R.; and Cho, K. 2016. End-to-end goal-driven web navigation. Advances in neural information processing systems, 29.", + "OpenAI. 2023. GPT-4 Technical Report. ArXiv, abs/2303.08774.", + "Pan, J.; Zhang, Y.; Tomlin, N.; Zhou, Y.; Levine, S.; and Suhr, A. 2024. Autonomous Evaluation and Refinement of Digital Agents. arXiv:2404.06474." + ], + "bbox": [ + 517, + 69, + 913, + 888 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Pang, R. Y.; Yuan, W.; Cho, K.; He, H.; Sukhbaatar, S.; and Weston, J. 2024. Iterative Reasoning Preference Optimization. arXiv:2404.19733.", + "Putta, P.; Mills, E.; Garg, N.; Motwani, S.; Finn, C.; Garg, D.; and Rafailov, R. 2024. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199.", + "Qin, Y.; Ye, Y.; Fang, J.; Wang, H.; Liang, S.; Tian, S.; Zhang, J.; Li, J.; Li, Y.; Huang, S.; et al. 2025. UI-TARS: Pioneering Automated GUI Interaction with Native Agents. arXiv preprint arXiv:2501.12326.", + "Qwen;.; Yang, A.; Yang, B.; Zhang, B.; Hui, B.; Zheng, B.; Yu, B.; Li, C.; Liu, D.; Huang, F.; Wei, H.; Lin, H.; Yang, J.; Tu, J.; Zhang, J.; Yang, J.; Yang, J.; Zhou, J.; Lin, J.; Dang, K.; Lu, K.; Bao, K.; Yang, K.; Yu, L.; Li, M.; Xue, M.; Zhang, P.; Zhu, Q.; Men, R.; Lin, R.; Li, T.; Tang, T.; Xia, T.; Ren, X.; Ren, X.; Fan, Y.; Su, Y.; Zhang, Y.; Wan, Y.; Liu, Y.; Cui, Z.; Zhang, Z.; and Qiu, Z. 2025. Qwen2.5 Technical Report. arXiv:2412.15115.", + "Radford, A.; Kim, J. W.; Xu, T.; Brockman, G.; McLeavey, C.; and Sutskever, I. 2023. Robust speech recognition via large-scale weak supervision. In International conference on machine learning, 28492-28518. PMLR.", + "Reddy, C. K.; Beyrami, E.; Pool, J.; Cutler, R.; Srinivasan, S.; and Gehrke, J. 2019. A scalable noisy speech dataset and online subjective test framework. arXiv preprint arXiv:1909.08050.", + "Shi, T.; Karpathy, A.; Fan, L.; Hernandez, J.; and Liang, P. 2017. World of bits: An open-domain platform for web-based agents. In International Conference on Machine Learning, 3135-3144. PMLR.", + "Song, Y.; Thai, K.; Pham, C. M.; Chang, Y.; Nadaf, M.; and Iyyer, M. 2025. Bearcubs: A benchmark for computer-using web agents. arXiv preprint arXiv:2503.07919.", + "Team. 2025. Gemini 2.5: Pushing the Frontier with Advanced Reasoning, Multimodality, Long Context, and Next Generation Agentic Capabilities. arXiv:2507.06261.", + "Wang, Z. Z.; Mao, J.; Fried, D.; and Neubig, G. 2024. Agent workflow memory. arXiv preprint arXiv:2409.07429.", + "Wu, Q.; Cheng, K.; Yang, R.; Zhang, C.; Yang, J.; Jiang, H.; Mu, J.; Peng, B.; Qiao, B.; Tan, R.; et al. 2025. GUI-Actor: Coordinate-Free Visual Grounding for GUI Agents. arXiv preprint arXiv:2506.03143.", + "Wu, Z.; Han, C.; Ding, Z.; Weng, Z.; Liu, Z.; Yao, S.; Yu, T.; and Kong, L. 2024a. OS-Copilot: Towards Generalist Computer Agents with Self-Improvement. arXiv:2402.07456.", + "Wu, Z.; Wu, Z.; Xu, F.; Wang, Y.; Sun, Q.; Jia, C.; Cheng, K.; Ding, Z.; Chen, L.; Liang, P. P.; et al. 2024b. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218.", + "Xu, Y.; Wang, Z.; Wang, J.; Lu, D.; Xie, T.; Saha, A.; Sahoo, D.; Yu, T.; and Xiong, C. 2024. Aguvis: Unified Pure Vision Agents for Autonomous GUI Interaction. arXiv:2412.04454." + ], + "bbox": [ + 83, + 68, + 480, + 888 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Xu, Y.; Wang, Z.; Wang, J.; Lu, D.; Xie, T.; Saha, A.; Sahoo, D.; Yu, T.; and Xiong, C. 2025. Aguvis: Unified Pure Vision Agents for Autonomous GUI Interaction. arXiv:2412.04454.", + "Yang, J.; Zhang, H.; Li, F.; Zou, X.; Li, C.; and Gao, J. 2023. Set-of-Mark Prompting Unleashes Extraordinary Visual Grounding in GPT-4V. arXiv preprint arXiv:2310.11441.", + "Yang, Y.; Li, D.; Dai, Y.; Yang, Y.; Luo, Z.; Zhao, Z.; Hu, Z.; Huang, J.; Saha, A.; Chen, Z.; et al. 2025. GTA1: GUI Test-time Scaling Agent. arXiv preprint arXiv:2507.05791.", + "Yang, Y.; Wang, Y.; Li, D.; Luo, Z.; Chen, B.; Huang, C.; and Li, J. 2024. Aria-UI: Visual Grounding for GUI Instructions. arXiv preprint arXiv:2412.16256.", + "Yao, S.; Chen, H.; Yang, J.; and Narasimhan, K. 2022. Webshop: Towards scalable real-world web interaction with grounded language agents. Advances in Neural Information Processing Systems, 35: 20744-20757.", + "Yao, S.; Zhao, J.; Yu, D.; Du, N.; Shafran, I.; Narasimhan, K.; and Cao, Y. 2023. ReAct: Synergizing Reasoning and Acting in Language Models. arXiv:2210.03629.", + "Ying, L.; Liu, J. X.; Aanya, S.; Fang, Y.; Tellex, S.; Tenenbaum, J. B.; and Shu, T. 2024. SIFToM: Robust Spoken Instruction Following through Theory of Mind. arXiv:2409.10849.", + "Yuan, X.; Zhang, J.; Li, K.; Cai, Z.; Yao, L.; Chen, J.; Wang, E.; Hou, Q.; Chen, J.; Jiang, P.-T.; and Li, B. 2025. Enhancing Visual Grounding for GUI Agents via Self-Evolutionary Reinforcement Learning. arXiv:2505.12370.", + "Zhang, C.; Yang, Z.; Liu, J.; Han, Y.; Chen, X.; Huang, Z.; Fu, B.; and Yu, G. 2023. AppAgent: Multimodal Agents as Smartphone Users. arXiv:2312.13771.", + "Zhang, Z.; and Zhang, A. 2024. You Only Look at Screens: Multimodal Chain-of-Action Agents. arXiv:2309.11436.", + "Zheng, B.; Gou, B.; Kil, J.; Sun, H.; and Su, Y. 2024a. Gpt-4v (ision) is a generalist web agent, if grounded. arXiv preprint arXiv:2401.01614.", + "Zheng, L.; Wang, R.; Wang, X.; and An, B. 2024b. Synapse: Trajectory-as-Exemplar Prompting with Memory for Computer Control. arXiv:2306.07863.", + "Zhou, A.; Yan, K.; Shlapentokh-Rothman, M.; Wang, H.; and Wang, Y.-X. 2024. Language Agent Tree Search Unifies Reasoning Acting and Planning in Language Models. arXiv:2310.04406.", + "Zhou, S.; Xu, F. F.; Zhu, H.; Zhou, X.; Lo, R.; Sridhar, A.; Cheng, X.; Ou, T.; Bisk, Y.; Fried, D.; et al. 2023. Webarena: A realistic web environment for building autonomous agents. arXiv preprint arXiv:2307.13854." + ], + "bbox": [ + 517, + 68, + 913, + 761 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 439, + 82, + 560, + 108 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "More experiment results", + "text_level": 1, + "bbox": [ + 176, + 188, + 387, + 205 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Full VLM & LRM + Grounding results", + "text_level": 1, + "bbox": [ + 84, + 208, + 388, + 223 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "For the best three grounding models, GTA-1 (Yang et al. 2025), GUI-Actor (Wu et al. 2025) and UI-TARS (Qin et al. 2025), we test their pairing with all the VLMs and LRMs. Table 3 shows the full results. All the evaluation experiments are run on a single A100 GPU for 20 - 40 minutes. Finetuning GTA-1 model takes 4 hours on 4 A100 GPUs.", + "bbox": [ + 84, + 224, + 478, + 309 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/95dcf2badd6229a6cb610ae8fd4f5db862d76b97acd78d06b8096a64989af287.jpg", + "image_caption": [ + "Experiment with different context lengths", + "Figure 6: Effect of context length on Gemini 2.5 Flash + GTA-1." + ], + "image_footnote": [], + "bbox": [ + 86, + 354, + 473, + 580 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We evaluated the best-performing VLM (Gemini 2.5 Flash) + GTA-1 with varying history context lengths, from no history to 20 steps. An ideal assistant should be able to leverage different kinds of historical context based on different instructions, ranging from no history to multi-task history context (e.g., for routine learning). As shown in Figure 6, increasing context length also does not necessarily lead to better performance. Gemini 2.5 Flash + GTA-1 achieved the highest task success rate with a context length of 10, and increasing the context length further led to poorer performance. This suggest the limitation of VLM in effectively utilizing historical context for reasoning.", + "bbox": [ + 86, + 637, + 478, + 804 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Effect of Speech Recognition Errors", + "text_level": 1, + "bbox": [ + 84, + 815, + 362, + 830 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "All baseline experiments use the ground truth transcripts of user speech instructions as input to ensure that performance is not affected by errors in speech-to-text transcription. However, in real-world settings, instructions are of-", + "bbox": [ + 84, + 833, + 478, + 890 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ten given via speech. To reflect this, we evaluated the effect of speech recognition on the agent's performance by using the transcripts generated from a state-of-the-art automatic speech recognition (ASR) model, Whisper LargeV3 (Radford et al. 2023). Additionally, since users may not always be in quiet, controlled environments using a high-quality microphone like in our user experiment setup, we simulated noisy environments by injecting background noise with noise files from the Microsoft Scalable Noisy Speech Dataset (MS-SNSD) dataset (Reddy et al. 2019), following (Ying et al. 2024). The noise files include people talking in the background and keyboard typing sounds. As shown in Table 4, using speech recognition resulted in a $1.9\\%$ drop in task success rate, and having noisy speech resulted in a further $1.9\\%$ drop. In contrast, the word error rate (WER) of the ASR results increased from $1.4\\%$ (original speech) to $28.1\\%$ (noisy speech), a much larger performance drop compared to the final task performance. This result suggests that reasoning the true meanings of speech instructions by leveraging context can help mitigate errors from ASR.", + "bbox": [ + 517, + 189, + 911, + 467 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Dataset Construction Details", + "text_level": 1, + "bbox": [ + 594, + 483, + 836, + 500 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Video Segmenting. As shown in the video example, the interactive sessions are highly dynamic, and spoken instructions do not always align cleanly with specific screens or timesteps. Automatically segmenting instructions and matching them to corresponding webpages and actions using heuristics would risk significantly degrading data quality. Therefore, we manually segment the live sessions using video editing software to construct the final RealWebAssist dataset. All participants provided consent to have their speech recorded and included in this dataset.", + "bbox": [ + 517, + 506, + 911, + 645 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Bounding Box Labeling. As shown in Figure 7, certain instructions like \"close all the tabs\" may correspond to multiple valid actions, since closing any of the tabs first would be reasonable. Therefore, we add bounding boxes to all of the elements that would be correct. The bounding boxes are drawn manually using a Python tool built with tkinter, and the clickable regions are determined by a visual inspection of the webpage.", + "bbox": [ + 517, + 646, + 911, + 757 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "More Dataset Details", + "text_level": 1, + "bbox": [ + 625, + 773, + 803, + 790 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Evaluation detail", + "text_level": 1, + "bbox": [ + 519, + 797, + 653, + 811 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "User instructions in RealWebAssist require different operations on the webpage, including clicking, scrolling and typing. We believe that action types other than clicking is trivial (for typing actions, the benchmark includes the step of finding the correct place to type instead of the actual typing", + "bbox": [ + 517, + 819, + 913, + 890 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/e28c5b91165be0534e0d20d81aba0b282b8600cebcd81bf68b8bff288a0a0eca.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
VLM + GTA-1GPT-4o + GTA-18.423.572.7
Qwen 2.5 72B + GTA-19.324.369.0
Gemini 2.5 Flash + GTA-111.226.975.4
LRM + GTA-1Claude 3.7 Sonnet + GTA-112.126.768.8
Gemini 2.5 Pro + GTA-18.423.574.5
o1 + GTA-17.521.173.1
o3 + GTA-114.028.776.7
o4-mini + GTA-110.321.767.1
VLM + GUI-ACTORGPT-4o + GUI-Actor6.518.067.0
Qwen 2.5 72B + GUI-Actor9.321.464.9
Gemini 2.5 Flash + GUI-Actor10.325.673.1
LRM + GUI-ACTORClaude 3.7 Sonnet+ GUI-Actor7.518.563.9
Gemini 2.5 Pro + GUI-Actor9.324.073.2
o1 + GUI-Actor7.517.768.2
o3 + GUI-Actor12.127.474.0
o4-mini + GUI-Actor8.420.065.1
VLM + UI-TARSGPT-4o + UI-TARS6.520.867.3
Qwen 2.5 72B + UI-TARS7.521.863.2
Gemini 2.5 Flash + UI-TARS9.324.170.2
LRM + UI-TARSClaude 3.7 Sonnet + UI-TARS9.317.561.5
Gemini 2.5 Pro + UI-TARS7.523.471.6
o1 + UI-TARS6.518.566.0
o3 + UI-TARS12.127.272.4
o4-mini + UI-TARS7.519.462.5
", + "bbox": [ + 168, + 64, + 828, + 402 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/c0dcda412e8aea234339339191505fc7c36ae8cf5fbe152af1a3683d49902da2.jpg", + "table_caption": [ + "Table 3: Model Performance for pairing GTA-1, GUI-Actor and UI-TARS with all LRMs & VLMs, including task success rate, average progress, and step accuracy. All results are in %." + ], + "table_footnote": [], + "table_body": "
Input TranscriptTask SuccessProgressStep Accuracy
Ground Truth10.321.766.4
Whisper Large-V38.420.965.5
Whisper Large-V3 (Noise)6.520.663.4
", + "bbox": [ + 84, + 453, + 477, + 566 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 4: Performance of GPT-4o + UGround-V1 using (1) ground-truth transcripts, (2) transcripts generated from original user speech by Whisper Large-V3, and (3) transcripts generated from noisy speech by Whisper Large-V3.", + "bbox": [ + 81, + 575, + 480, + 636 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "process), so we only evaluate click-type actions with annotated bounding boxes are scored; instructions like \"scroll\" remain in the history but are not counted in our metrics. Of the 1,885 instructions, 1,412 are scored, yielding 1,714 evaluated action steps (one screenshot per step). Tasks average 17.6 evaluated steps.", + "bbox": [ + 81, + 660, + 480, + 747 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "User behaviors", + "text_level": 1, + "bbox": [ + 83, + 757, + 204, + 771 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Figure 8 shows diverse user behaviors in RealWebAssist not present in previous benchmarks. We include a zip file of the live recordings (including audio) from which the examples are taken.", + "bbox": [ + 81, + 776, + 478, + 830 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Information seeking As Figure 8A shows, the user is seeking information from different aspects, like images and ratings, before they make the purchase decision.", + "bbox": [ + 81, + 832, + 478, + 875 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Comparing different options Figure 8B shows the process", + "bbox": [ + 83, + 875, + 478, + 888 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "of the user viewing two candidates and finally make the decision between them.", + "bbox": [ + 514, + 455, + 911, + 483 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Changing minds In Figure 8C, the user is searching for some immersive dining experience. They are checking different restaurants and frequently change their minds when they see more options.", + "bbox": [ + 514, + 484, + 911, + 539 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Trial-and-error As Figure 8D shows, the user has several unsuccessful attempts when searching for men's fashion week. They refer to previous searches or initiate new ones to look for what they want.", + "bbox": [ + 514, + 539, + 911, + 595 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "These diverse behaviors increase the complexity of the web assistance: instead of clearly defined-goals, the user themselves are also actively collecting knowledge to make decisions, which requires web assistant to follow the user's mind and act accordingly.", + "bbox": [ + 514, + 595, + 911, + 666 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/85af84f589e6a25c2962527918386f0b16a85e37584505f52029ff88a2d1a816.jpg", + "image_caption": [ + "\"Close all the tabs\"" + ], + "image_footnote": [], + "bbox": [ + 86, + 89, + 897, + 248 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A Information seeking", + "text_level": 1, + "bbox": [ + 94, + 330, + 346, + 351 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/461528e1a0f26a59ab974cdb764d3878636aa1d9dbf8cf802bb648c7768dafcf.jpg", + "image_caption": [ + "Figure 7: Example of annotated bounding boxes for an instruction. The red boxes represent the correct bounding boxes. The user gave the instruction \"Close all the tabs\". For evaluation purposes, closing any of the tabs first is considered correct at each step, so all the x marks are labeled as correct at each step." + ], + "image_footnote": [], + "bbox": [ + 94, + 354, + 916, + 455 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "B Comparing different options", + "text_level": 1, + "bbox": [ + 93, + 465, + 441, + 486 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d437676042ad38ac634c0d36d327cc10e98ca3bb9a29f7c189a150c8f1375f3c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 93, + 488, + 916, + 599 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "C Changing minds", + "text_level": 1, + "bbox": [ + 93, + 614, + 300, + 636 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/2804f0236f67ed074ccb568daf1db5e3988534a2f55bf4a59dc12969c3573f75.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 94, + 641, + 916, + 734 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "D Trial-and-error", + "text_level": 1, + "bbox": [ + 94, + 744, + 289, + 763 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/8bfb0f9626c848519a4275d943677e17f7c84cbd0ee00c672e1e4d2e7106f0da.jpg", + "image_caption": [ + "Figure 8: Example of rich user behaviors in RealWebAssist." + ], + "image_footnote": [], + "bbox": [ + 94, + 768, + 893, + 861 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Task # Description", + "text_level": 1, + "bbox": [ + 163, + 101, + 312, + 116 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + " 1 Buy a gift for each of my three friends with a budget of $100", + "2 Find and buy a birthday gift for a friend who loves tech, within a $50 budget.", + "3 Purchase a cute water bottle for everyday use, under $15", + "4 Compare different laptops and buy one with the best review", + "5 Purchase three home workout items under \\(75 and compare their reviews before buying.", + " 6 Find and order a customized gift (e.g., engraved or personalized) for a friend's graduation under $60.", + " 7 Order a complete warm and durable winter outfit (jacket, gloves, and boots) under $200.", + "8 Get two sets of reusable grocery bags under \\(20 total, checking for durability and eco-friendliness.", + "9 Buy two wall paintings for a family house, one for a 13-year old boy, one for a 6-year old girl", + "10 Purchase a set of colorful coffee mugs under $20 with fun designs", + "11 Buy a small easy-care indoor plant under \\(15 and schedule delivery within three days", + "12 Get a colorful umbrella for under \\(30, making sure it's big enough for two people", + "13 Buy a set of scented candles under $25, ensuring they have good reviews for long-lasting fragrance.", + "14 Find and purchase a durable phone case under $20 for an iPhone 14 Pro Max.", + "15 Order a cozy throw blanket under \\(30, checking for softness and warmth.", + "16 Buy a set of three face masks (reusable & breathable) under $15.", + "17 Get a wireless Bluetooth speaker under \\(40 with good bass and waterproofing.", + "18 Order a set of noise-canceling earplugs under $15, ensuring they're comfortable for sleep.", + "19 Find and buy a compact travel pillow and eye mask set under $30.", + "20 Purchase a set of six kitchen towels under \\(20 with high absorbency.", + "21 Buy an adjustable desk lamp under \\(35 with multiple brightness settings.", + "22 Order a pack of 12 gel pens under \\(15 in assorted colors with smooth writing.", + "23 Purchase a waterproof picnic blanket under \\(40, ensuring it's easy to fold and carry.", + "24 Buy a cute yet professional notebook under \\(20 for journaling or work.", + "25 Find and purchase a comfortable memory foam seat cushion under \\(35 for long sitting hours.", + "26 Order a set of reusable silicone food storage bags under $25.", + "27 Buy a pair of comfy indoor slippers under \\(30 with high reviews for warmth and durability.", + "28 Purchase a portable mini humidifier under \\(40 with USB charging.", + "29 Order a stylish travel makeup bag under \\(25, ensuring it has multiple compartments.", + "30 Find and order a surprise gift box for a friend who enjoys skincare, under $50.", + "31 Compare wireless earbuds and purchase the best-reviewed pair under $100.", + "32 Order a budget-friendly yet stylish smartwatch under $\\$ {75}$ ,ensuring good battery life.", + "33 Find and order a high-quality mechanical keyboard under $120, comparing typing feel and reviews", + "34 Find and buy a useful desk gadget under \\(40 for a friend who works from home", + "35 Plan flights for a trip from US to Europe (at least two different countries) for 3 days, comparing different airlines to find the best deal.", + "36 Plan a 5-day trip to Japan, booking both flights and hotels, taking into account customer reviews.", + "37 Book a hotel for a weekend trip for a good price near the beach within the country, making sure you can cancel the trip at any time", + "38 Plan a spontaneous weekend trip to a destination with cheap last-minute flights and good hotel deals, for hotel make sure it's comfortable enough.", + "39 Book a luxury hotel for a weekend at a city in the west US, pay attention to different services offered", + "40 Plan a three-stop European trip in a single week, with flights and hotel for each place", + "41 Book hotel for a family tour of four to a kid-friendly destination, with a hotel offering family amenities and breakfast included.", + "42 Arrange a road trip across the US, booking rental cars and a mix of motels and boutique hotels along the route.", + "43 Book a romantic beach getaway in Hawaii for two people, make sure it's close to beach and have sea view" + ], + "bbox": [ + 196, + 121, + 833, + 882 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Full List of Tasks", + "bbox": [ + 84, + 66, + 220, + 80 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Task # Description", + "text_level": 1, + "bbox": [ + 163, + 69, + 313, + 85 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "44 Plan a family Disney Cruise, securing flights to Port Canaveral and a hotel near the theme parks before sailing.", + "45 Arrange a wine country getaway, booking flights to Napa Valley, a rental car, and a vineyard hotel with wine-tasting experiences.", + "46 Find flights and a convertible rental car for a coastal drive in Hawaii, staying in beachfront resorts along the way.", + "47 Choose flights to a popular ski destination and secure a lodge or hotel under \\(150/night.", + "48 Book last-minute flights and a centrally located hotel in a major US city, focusing on deals under $100/night with great city landscape view.", + "49 Secure round-trip flights to a scenic South American city and book a comfortable hotel near local attractions.", + "50 Pick flights from a major US airport to a warm city in Canada, with a hotel under $100/night in the downtown area.", + "51 Schedule flights and a boutique hotel stay in a city rich in history, aiming for under $100/night in a central location.", + "52 Arrange direct flights to a popular theme park region, booking a nearby hotel or hotel with easy transportation", + "53 Schedule flights for a quick visit to a popular national park, booking a nearby lodge or hotel with scenic views.", + "54 Book round-trip flights to a major Middle Eastern city and reserve a modern hotel near historic sites for under $100/night", + "55 Secure flights from the US to a tropical island, choosing a resort that offers water sports", + "56 Find flights and a resort for a tropical vacation in Cancun, Mexico, focusing on all-inclusive options for relaxation", + "57 Book flights to Cairo for a 5-day trip, then pick a hotel with a direct view of the Pyramids and free breakfast included", + "58 Book a solo retreat to Kyoto, Japan, selecting a traditional ryokan stay with an onsen and authentic Japanese breakfast.", + "59 Buy tickets for 2 people to an NBA Basketball game next weekend.", + "60 Find and book tickets for a concert by a top artist in the nearest major city within the next three months.", + "61 Search for a last-minute concert ticket and find the best available seat.", + "62 Book 3 tickets for a rivalry match between two major sports teams", + "63 Book 3 tickets for a unique or unusual event, such as a drag show, wrestling match, or haunted experience", + "64 Purchase four tickets for a Broadway musical happening next month, aiming for orchestra seats if possible.", + "65 Buy tickets for a family of 4 with 2 kids to a MLB game", + "66 Find and book tickets to a popular stand-up comedy show in a western big city for the upcoming weekend, prioritizing seats near the front.", + "67 Locate discounted tickets for a live theater performance in California this weekend", + "Search for an NFL game next month and buy two tickets in a mid-priced seating section for some eastern teams", + "69 Identify and reserve tickets for a children's matinee performance at a local venue, comparing any available family packages or group discounts.", + "70 Secure seats for a must-see hockey match, comparing \"Best Seat\" options.", + "71 Find tickets for a classical music or orchestra concert in the nearest major city next month, aiming for seats with a good view of the stage.", + "72 Buy tickets for two people to an English Premier League soccer match in London city center next weekend.", + "73 Find and purchase tickets to a major electronic music festival in Las Vegas within the next two months.", + "74 Book seats for a stand-up comedy show in downtown Chicago next month, make sure the location is in city center.", + "75 Search for tickets to a top-tier cricket match in Sydney next month, aiming for seats that offer a good view of the pitch", + "76 Locate a family-friendly musical performance near your city for next month." + ], + "bbox": [ + 191, + 90, + 834, + 880 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Task # Description", + "text_level": 1, + "bbox": [ + 163, + 68, + 312, + 85 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "77 Purchase two tickets to an upcoming rugby match in Dublin next month, making sure seats are in a central section and remain under.", + "78 Find a highly rated ballet or opera production in Paris within the next two months, choose the seat in the second floor if available", + "79 Find tickets to a major fashion event, such as a runway show or fashion week experience.", + "80 Look for tickets to a themed immersive dining experience (e.g., murder mystery dinner, fantasy-inspired restaurant)", + "81 Book tickets for UEFA soccer game between two Spanish teams for the next week", + "82 Book a ticket for a rooftop movie screening or outdoor film festival in a major city.", + "83 Find tickets for an esports event and compare standard vs. premium seating options.", + "84 Book a ticket for a \"silent disco\" event in a city of your choice.", + "85 secure two tickets to a major MLB game in a well-known ballpark anywhere in the U.S. next month, opting for seats along the first baseline.", + "86 Find and book tickets for a large-scale country music festival occurring in the southern U.S. within the next two months, focusing on general admission passes.", + "87 Purchase seats for a top-tier college football rivalry game taking place within the next six weeks, ensuring you can view the marching band's performance easily.", + "88 Reserve tickets to a major NHL match in the next two months, choosing seats close to the ice.", + "89 Book passes for a nationally touring art exhibition or immersive art experience within the next two months, ensuring weekend availability.", + "90 Secure seats for a top-rated Broadway musical in New York City, making sure the date aligns with a Saturday evening performance.", + "91 Reserve a spot for a special museum or cultural center night event (e.g., \"Night at the Museum\" or themed after-hours) in a major U.S. city within the next two months.", + "92 Find the best deal on a new smartphone (latest model iPhone or Samsung)", + "93 Find the best dinner deal for two using food delivery apps", + "94 Purchase an outfit for a formal event within a $150 budget", + " 95 Buy a high-quality gaming chair for under $250", + "96 Find and book the best available concert tickets for a top artist in your city", + "97 Book tickets for a live theater performance and find a pre-show dinner reservation", + "98 Plan a sports game outing for two within a $150 budget", + "99 Plan a weekend getaway for two within a $500 budget", + "100 Organize a one-day itinerary for a solo traveler in a major city", + "101 Compare car rental options for a 5-day road trip", + "102 Find and book a local escape room challenge for a group of four", + "103 Plan a movie night with discounted tickets and snacks", + "104 Find a highly-rated sushi restaurant and order a meal for delivery", + "105 Plan a surprise birthday dinner at a fine dining restaurant", + "106 Order a late-night snack under $15 for delivery", + "107 Book a luxury hotel staycation for a weekend" + ], + "bbox": [ + 184, + 90, + 834, + 657 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/f1bb03f90e27f5b8f5b31973278bf8c3eb46cfe6cc37a5093cd408495df0d85d.jpg", + "table_caption": [ + "Full List of Websites" + ], + "table_footnote": [], + "table_body": "
NameURLTask Type
ACL Festivalaclfestival.comEntertainment
Amazonamazon.comShopping
Ammooraammoora.comEntertainment
Appleapple.comShopping
Artechouseartechouse.comEntertainment
Atom Ticketsatomtickets.comEntertainment
Best Buybestbuy.comShopping
Adidas Arenabilletterie.adidasarena.comEntertainment
Broadwaybroadway.comEntertainment
Charm City Clue Roomcharmcityclueroom.comEntertainment
City Passcitypass.comTravel Planning
CN Towercntower.caTravel Planning
Colorado Tourismcolorado.comTravel Planning
Corsaircorsair.comShopping
Coupon Followcouponfollow.comShopping
Crave 4Dcrave4d.comEntertainment
Dine Immersivedineimmersive.comFood
Disney Cruisedisneycruise.disney.go.comTravel Planning
DoorDashdoordash.comFood
Drone and DSLRdroneandslr.comShopping
Enterpriseenterprise.comTravel Planning
ESChartsescharts.comEntertainment
ETIXetix.comEntertainment
Eventbriteeventbrite.comEntertainment
Expediaexpedia.comTravel Planning
Fashion Week Onlinefashionweekonline.comEntertainment
Fever Upfeverup.comEntertainment
Googlegoogle.comTravel Planning
Google Mapsgoogle.com/mapsTravel Planning
Live Nationlivenation.comEntertainment
Library of Congressloc.govTravel Planning
LoL Esportslolesports.comEntertainment
MLBmlb.comEntertainment
MLB Ticketsmlbtickets.comEntertainment
NYICFFnyicff.orgEntertainment
OpenTableopentable.comFood
Postmatespostmates.comFood
Rakutenrakuten.comShopping
Redditredgit.comEntertainment
Retail Me Notretailmenot.comShopping
Road Trip USAroadtripusa.comTravel Planning
Samsungsamsung.comShopping
San Lorenzo DCsanlorenzodc.comFood
Screen Dailyscreendaily.comEntertainment
Secret Baltimoresecretbaltimore.comTravel Planning
Secret Labsecretlab.coShopping
Smithsonian Sleepoverssmithsoniansleepovers.orgEntertainment
StubHubstubhub.comEntertainment
The Bureau Fashion Weekthebureaufashionweek.comEntertainment
The Meltdownthemeltdown.comEntertainment
The UFLtheufl.comEntertainment
Ticketmasterticketmaster.comEntertainment
Ticketmaster Franceticketmaster.frEntertainment
Ticket Webticketweb.comEntertainment
TickPicktickpick.comEntertainment
TripAdvisortripadvisor.comTravel Planning
Two Step Inntwostepinn.comEntertainment
Two Step Inn Frontgatetwostepinn.frontgatetickets.comEntertainment
Uberuber.comTravel Planning
Uber Eatsubereats.comFood
Viatorviator.comTravel Planning
Vivid Seatsvividseats.comEntertainment
Washington Tourismwashington.orgTravel Planning
Yelpyelp.comFood
Zarazara.comShopping
", + "bbox": [ + 250, + 710, + 748, + 878 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 250, + 65, + 748, + 776 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Word Frequency", + "text_level": 1, + "bbox": [ + 84, + 68, + 217, + 84 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Figure 9 compares the most frequent instruction words in RealWebAssist with those from two common benchmarks, WebLINX and WebArena. The vocabulary used in RealWebAssist is more informal, as the dataset comes from natural spoken instructions. The tone is also more informal and conversational compared to WebLINX and WebArena.", + "bbox": [ + 81, + 85, + 480, + 170 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/cf0f06fa68bc84e998eb72b04f0557365b08dfdb0d2f7b19b5f24754427b7de7.jpg", + "image_caption": [ + "Figure 9: Word Cloud of the most frequent words in RealWebAssist v.s. common benchmarks WebLINX and WebArena." + ], + "image_footnote": [], + "bbox": [ + 124, + 191, + 460, + 358 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Instructions for the participants", + "text_level": 1, + "bbox": [ + 145, + 445, + 418, + 464 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Thank you for participating in our study! You'll be guiding another person who is controlling the computer on your behalf. Imagine you are helping a friend navigate a website remotely, giving step-by-step instructions to complete a task. Feel free to interpret the task as you see fit. Here are some guidelines to keep in mind:", + "bbox": [ + 107, + 476, + 454, + 575 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Give instructions as naturally as possible, just like you would in real life.", + "- You don't have to be overly precise—say what feels natural.", + "- You can only give one instruction at a time. After the operator follows your instruction, wait for them to complete it before giving the next step.", + "- Keep your instructions clear and concise, but don't stress too much about exact wording—just say what comes to mind!", + "- You are allowed to instruct the operator to use Google to search for things." + ], + "bbox": [ + 116, + 578, + 454, + 758 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Video Example", + "text_level": 1, + "bbox": [ + 215, + 781, + 346, + 799 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A sample raw recording can be viewed via the link below (audio included)", + "bbox": [ + 107, + 805, + 452, + 834 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "https://youtu.be/CcyIt9tr5qo", + "bbox": [ + 107, + 845, + 299, + 862 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10445/92de7eea-1f86-4346-b55b-8d273a167685_model.json b/data/2025/2504_10xxx/2504.10445/92de7eea-1f86-4346-b55b-8d273a167685_model.json new file mode 100644 index 0000000000000000000000000000000000000000..7c8bc14b08210d01f00f8cdea4bda77828d2e0a3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/92de7eea-1f86-4346-b55b-8d273a167685_model.json @@ -0,0 +1,4293 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.286, + 0.058, + 0.71 + ], + "angle": 270, + "content": "arXiv:2504.10445v2 [cs.AI] 1 Dec 2025" + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.12, + 0.911, + 0.162 + ], + "angle": 0, + "content": "RealWebAssist: A Benchmark for Long-Horizon Web Assistance with Real-World Users" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.174, + 0.877, + 0.194 + ], + "angle": 0, + "content": "Suyu Ye\\*, Haojun Shi\\*, Darren Shih 1, Hyokun Yun 2, Tanya G. Roosta 2, Tianmin Shu" + }, + { + "type": "text", + "bbox": [ + 0.408, + 0.196, + 0.59, + 0.212 + ], + "angle": 0, + "content": "1Johns Hopkins University," + }, + { + "type": "text", + "bbox": [ + 0.45, + 0.211, + 0.548, + 0.224 + ], + "angle": 0, + "content": "2Amazon.com" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.224, + 0.772, + 0.24 + ], + "angle": 0, + "content": "{sye10, hshi33, dshih5, tianmin.shu}@jhu.edu, {yunhyoku,troosta} \\(@\\) amazon.com" + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.274, + 0.314, + 0.287 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.295, + 0.465, + 0.599 + ], + "angle": 0, + "content": "To achieve successful assistance with long-horizon web-based tasks, AI agents must be able to sequentially follow real-world user instructions over a long period. Unlike existing web-based agent benchmarks, sequential instruction following in the real world poses significant challenges beyond performing a single, clearly defined task. For instance, real-world human instructions can be ambiguous, require different levels of AI assistance, and may evolve over time, reflecting changes in the user's mental state. To address this gap, we introduce RealWebAssist, a novel benchmark designed to evaluate sequential instruction-following in realistic scenarios involving long-horizon interactions with the web, visual GUI grounding, and understanding ambiguous real-world user instructions. RealWebAssist includes a dataset of sequential instructions collected from real-world human users. Each user instructs a web-based assistant to perform a series of tasks on multiple websites. A successful agent must reason about the true intent behind each instruction, keep track of the mental state of the user, understand user-specific routines, and ground the intended tasks to actions on the correct GUI elements. Our experimental results show that state-of-the-art models struggle to understand and ground user instructions, posing critical challenges in following real-world user instructions for long-horizon web assistance." + }, + { + "type": "title", + "bbox": [ + 0.227, + 0.617, + 0.337, + 0.632 + ], + "angle": 0, + "content": "Introduction" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.635, + 0.48, + 0.815 + ], + "angle": 0, + "content": "As an integral part of people's daily life, many of our everyday tasks are performed on the internet. With the tremendous advances in open-ended agents driven by large reasoning models (LRMs) and vision-language models (VLMs), there has been increasing interest in engineering web-based agents that can assist humans with complex tasks on the web following humans' instructions (Zheng et al. 2024a; Nakano et al. 2022). Recent works have demonstrated the promising performance of web-based agents on planning (Putta et al. 2024; Wang et al. 2024; Yao et al. 2023) and Graphical User Interface (GUI) grounding (Cheng et al. 2024; Wu et al. 2024b; Gou et al. 2024; Yang et al. 2024; Xu et al. 2024), across diverse websites, tasks, and GUI interfaces." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.816, + 0.48, + 0.844 + ], + "angle": 0, + "content": "Despite these encouraging results, there have not been systematic studies on long-horizon web assistance with real-" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.274, + 0.914, + 0.385 + ], + "angle": 0, + "content": "world users. Existing benchmarks (e.g., (Zhou et al. 2023; Deng et al. 2024; Cheng et al. 2024; Yao et al. 2022; Jang et al. 2024)) typically focus on performing a task based on a single instruction. Additionally, the instructions in the current benchmarks were not collected from real users during natural web use sessions, lacking the realism of real user instructions. As a result, these benchmarks do not capture the full complexity of real users' web behavior and instructions." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.386, + 0.913, + 0.581 + ], + "angle": 0, + "content": "To bridge this gap, we propose RealWebAssist, the first sequential instruction following benchmark that evaluates long-horizon web assistance with real-world users. As illustrated in Figure 1, to perform a task, a user will instruct an AI assistant in a long sequence. Based on the past instructions and screenshots, the AI assistant must execute one or a few steps of actions to perform the latest instruction. Additionally, a user can engage in repeated interactions over a series of tasks with the assistant in a long session up to 40 minutes. To construct RealWebAssist, we recruited real users to instruct an assistant to perform multiple real-world tasks on the web. We created a large dataset with real user instructions (in both speech and text) for diverse real-world tasks and websites (as shown in Figure 2)." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.582, + 0.914, + 0.859 + ], + "angle": 0, + "content": "The sequential instruction following tasks in our RealWebAssist benchmark reflect the natural human behavior on the web. First, real-world users may not initially know what they are looking for. Thus, they need to engage in information seeking on multiple web pages (e.g., step 1-2 in Figure 1), sometimes even across websites. Second, based on new information such as product reviews, users may change their minds (e.g., step 3). Third, users give simple instructions that are seemingly ambiguous out of the context but could be interpreted based on spatial and temporal context via pragmatic reasoning (Goodman and Frank 2016; Fried et al. 2023). For instance, the third instruction in Figure 1 does not explicitly describe which product, but an intelligent assistant should be able to infer the true user intent and correctly select the product in the user's mind. Lastly, in our benchmark, users can browse the websites and have the autonomy to make critical decisions (such as purchasing) on their own, which is complementary to existing benchmarks that focus on agents' planning ability to fully complete the tasks without human involvement." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.861, + 0.913, + 0.89 + ], + "angle": 0, + "content": "We systematically evaluate state-of-the-art models, including GUI grounding, VLMs, and large reasoning mod" + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.851, + 0.48, + 0.89 + ], + "angle": 0, + "content": "*These authors contributed equally. Copyright © 2026, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.107, + 0.066, + 0.861, + 0.207 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.217, + 0.916, + 0.261 + ], + "angle": 0, + "content": "Figure 1: An example sequential instruction following task with a real-world user. The red circles indicate the correct actions based on the user's spoken instructions. Sequential instructions introduce unique challenges, such as the need to retain and reason over past context. For instance, the instruction in step 3 requires information from step 1 to be correctly interpreted." + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.273, + 0.852, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.426, + 0.916, + 0.468 + ], + "angle": 0, + "content": "Figure 2: Examples of general task categories (left) and websites visited (right) in RealWebAssist. The tasks span a wide range of real-world scenarios, from shopping to food & entertainment to travel planning, which encourages users to visit many different websites." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.495, + 0.481, + 0.553 + ], + "angle": 0, + "content": "els. Experimental results reveal that these models lack several key abilities, including grounding, understanding user intents, reasoning about spatial and temporal context, and adapting to user-specific routines." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.565, + 0.347, + 0.58 + ], + "angle": 0, + "content": "Related Works" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.584, + 0.48, + 0.89 + ], + "angle": 0, + "content": "Web Agent Benchmarks. Existing web agent benchmarks primarily evaluate the performance of web agents on tasks with clearly defined, unambiguous instructions, often overlooking the complexities of real-world users' behavior and their instructions to an AI assistant. On WebArena (Zhou et al. 2023), Mind2Web (Deng et al. 2024), and WebShop (Yao et al. 2022), an agent follows a single instruction to perform an isolated task. While they offer an evaluation of an agent's planning capacity, they lack the evaluation of an agent's ability to follow a long sequence of user instructions on long-horizon web tasks. There have also been GUI grounding benchmarks, such as ScreenSpot (Cheng et al. 2024), that focused on grounding simple instructions to clicking actions on webpages. These instructions only instruct web agents to click web elements rather than reaching a user goal (e.g., purchasing an item). WebLINX (Lü, Kasner, and Reddy 2024) features sequential instruction following. However, the instructions were generated by annotators who received detailed guidelines and extensive training, rather than by actual users. The resulting instructions do not capture the nuances and complexity of real-world user instructions that naturally emerge in interactions with an as-" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.495, + 0.913, + 0.566 + ], + "angle": 0, + "content": "sistent. In contrast, RealWebAssist consists of sequential instruction following tasks for assisting real-world users, providing a novel set of challenges necessary for long-horizon web assistance for real-world users. Table 1 summarizes key differences between RealWebAssist and prior benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.568, + 0.915, + 0.763 + ], + "angle": 0, + "content": "Autonomous Web Agents. There have been many recent works on engineering autonomous web agents through retrieval augmented planning (Kim et al. 2024; Zhou et al. 2024; Wu et al. 2024a; He et al. 2024; Pan et al. 2024), finetuning (Hong et al. 2024; Gur et al. 2024; Deng et al. 2024; Pang et al. 2024; Zhang and Zhang 2024), learning workflows (Zhang et al. 2023; Wang et al. 2024; Zheng et al. 2024b; Majumder et al. 2023; Cai et al. 2024), reinforcement learning (Liu et al. 2018; Shi et al. 2017; Nogueira and Cho 2016; Humphreys et al. 2022), and combinations of these methods (Liu et al. 2023; Putta et al. 2024). These works focus on planning for a single task. However, there has not been much work on understanding and following real-world users' sequential instructions on long-horizon tasks." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.764, + 0.915, + 0.89 + ], + "angle": 0, + "content": "GUI Grounding. One key ability for web agents in many assistance tasks is to ground instructions to clicking actions on a webpage. Recent works have explored VLM finetuning (e.g., (Gou et al. 2024; Wu et al. 2024b; Yang et al. 2024, 2025; Wu et al. 2025; Qin et al. 2025; Xu et al. 2025; Yuan et al. 2025)) as well as prompting pretrained VLMs with segmentations of web elements (e.g., (Yang et al. 2023)) for enabling GUI grounding. These methods generate coordinates or bounding boxes on webpages to indicate where to click." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.066, + 0.825, + 0.214 + ], + "angle": 0, + "content": "
BenchmarkReal UserSequential InstructionsReal WebsitesGUI GroundingSpeech# Instructions
SreenSpot (Cheng et al. 2024)XXX1200+
WebArena (Zhou et al. 2023)XXXXX812
Mind2Web (Deng et al. 2024)XXXX2000+
WebLINX (Lù, Kasner, and Reddy 2024)XXX512
VideoWebArena (Jang et al. 2024)XXXX2021
WebShop (Yao et al. 2022)XXXXX12087
BearCubs (Song et al. 2025)XXXX111
RealWebAssist (Ours)1885
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.231, + 0.913, + 0.288 + ], + "angle": 0, + "content": "Table 1: Comparison between RealWebAssist and existing web agent benchmarks on several key aspects: (1) whether instructions were given by real-world users instead of annotators, (2) whether there is a sequence of instructions, (3) whether there are real-world websites, (4) whether the agent needs to execute actions by selecting coordinates on webpages, (5) whether the instructions are speech instructions, and (6) the number of total instructions." + }, + { + "type": "image_caption", + "bbox": [ + 0.097, + 0.317, + 0.255, + 0.332 + ], + "angle": 0, + "content": "\"Ok, buy this item\"" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.334, + 0.266, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.277, + 0.318, + 0.457, + 0.333 + ], + "angle": 0, + "content": "\"Let's do All Airports\"" + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.334, + 0.454, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.428, + 0.48, + 0.471 + ], + "angle": 0, + "content": "Figure 3: Multiple actions can satisfy a user's intent. A web agent's action is considered correct if the coordinate they provide is within one of the annotated correct regions." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.498, + 0.48, + 0.556 + ], + "angle": 0, + "content": "They have only been trained on low-level instructions that clearly refer to web elements. It remains unclear if they can understand real-world user instructions that must be interpreted considering context or may refer to high-level goals." + }, + { + "type": "title", + "bbox": [ + 0.166, + 0.57, + 0.396, + 0.585 + ], + "angle": 0, + "content": "RealWebAssist Benchmark" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.591, + 0.203, + 0.607 + ], + "angle": 0, + "content": "Problem Setup" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.611, + 0.478, + 0.736 + ], + "angle": 0, + "content": "RealWebAssist evaluates agents' ability to follow long-horizon, sequential web instructions to assist users with their high-level goals. In each task, a human user will try to reach an open-ended goal such as \"buy formal outfits for a formal event\" by instructing the assistant through a series of spoken instructions. The dataset is collected from interactions between human users and human assistants in a human experiment. To evaluate agents, we use the human assistants' actions to evaluate the agents' success." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.737, + 0.48, + 0.89 + ], + "angle": 0, + "content": "In RealWebAssist, a web agent has access to the current instruction, webpage (as a screenshot), and all the past interactions (previous instructions & screenshots of webpages). Since we are focusing on tasks on real-world websites, it is challenging to ensure safety and reproducibility in an interactive evaluation setting. Therefore, we adopt an offline evaluation setting following prior web-based agent benchmarks with real websites (Deng et al. 2024; Cheng et al. 2024). Specifically, for each instruction collected from the human experiment, the agent needs to identify the correct element to interact with by providing a coordinate or a bound" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.315, + 0.913, + 0.399 + ], + "angle": 0, + "content": "ing box to click on the webpage. As shown by figure 3, a web agent's action is considered correct if the coordinate or the center of the bounding box they provide falls in the annotated correct regions on the webpage. If there are multiple steps corresponding to one instruction, we evaluate if the web agent's actions for the same instruction are all correct." + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.409, + 0.669, + 0.423 + ], + "angle": 0, + "content": "Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.426, + 0.821, + 0.441 + ], + "angle": 0, + "content": "We consider the following evaluation metrics:" + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.444, + 0.913, + 0.471 + ], + "angle": 0, + "content": "- Task success rate: A task is successful if the web agent can correctly produce actions for all instructions in a task." + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.473, + 0.913, + 0.514 + ], + "angle": 0, + "content": "- Average progress: We measure the progress of a task by the percentage of consecutive instructions the web agent can successfully perform before its first error in the task." + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.514, + 0.913, + 0.582 + ], + "angle": 0, + "content": "- Step success rate: We also consider a teacher forcing setting as a simpler, diagnostic evaluation, where the web agent will only need to follow the instruction at a single step of a task assuming all previous instructions have been successfully performed." + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.444, + 0.913, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.594, + 0.685, + 0.608 + ], + "angle": 0, + "content": "Dataset Construction" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.612, + 0.913, + 0.79 + ], + "angle": 0, + "content": "Setup. We recruited 10 participants (4 female, 6 male, mean age = 20 years) from a US university campus, none of whom had prior knowledge of the study's purpose, to construct the dataset. All participants were native or fluent English speakers. Each participant completed a 40-minute real-world web assistance session in which they tackled a series of open-ended tasks designed to encourage diverse strategies. During each session, participants verbally instructed an experimenter, who operated the computer on their behalf, to complete the tasks. We captured screen recordings and used a high-quality USB microphone to record speech as raw data. The user study was approved by an institutional review board." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.792, + 0.913, + 0.89 + ], + "angle": 0, + "content": "User Tasks. To increase the instruction diversity and realism, participants received general web-based tasks requiring active information seeking, sub-goal planning, and comparison among various options. We generated the task list by few-shot prompting GPT-4o with open-ended tasks, followed by manual filtering and editing to ensure task quality and feasibility. These tasks provide only general guidance," + } + ], + [ + { + "type": "text", + "bbox": [ + 0.084, + 0.069, + 0.48, + 0.125 + ], + "angle": 0, + "content": "ensuring flexibility for personal decision-making. Example tasks include \"Purchase an outfit for a formal event\" and \"Plan a 5-day trip to Japan, booking both flights and hotels\". Each user finishes about 10 tasks." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.125, + 0.48, + 0.194 + ], + "angle": 0, + "content": "Emergent User Behavior. In our realistic, open-ended settings, users exhibit rich behaviors that are not present in previous benchmarks. These include, but are not limited to, information seeking, researching and comparing different options, change of mind, and trial-and-error." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.194, + 0.48, + 0.485 + ], + "angle": 0, + "content": "Annotations. We manually labeled RealWebAssist data to ensure high-quality annotations. We first segmented the full recording into individual clips corresponding to each user's instructions. In our benchmark, we disregard user speech unrelated to explicit instructions for the assistant, such as filler words or verbalized thought processes. For each instruction, we provide raw speech, speech transcript, webpage, and the correct regions to click (in the form of one or more bounding boxes). When there were multiple correct answers for the instructions (for instance, \"can you close all the current tabs\"), we annotated all correct regions with multiple bounding boxes. When the experimenter made a mistake during the data collection sessions, we annotated the correct action intended by the user. If an instruction required multiple steps to complete, we set the instruction at each step as the same instruction. To generate the text instructions, we used an off-the-shelf recognition model, Whisper Large-V3 (Radford et al. 2023), to transcribe users' speech and then manually fixed transcription errors. For all the instructions, we have three annotators verifying all of them, ensuring \\(100\\%\\) agreement." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.485, + 0.48, + 0.542 + ], + "angle": 0, + "content": "Dataset Statistics. RealWebAssist contains 1,885 user instructions across 107 tasks, 66 websites, and 2,524 screenshots. In addition to the benchmark, we also plan to release the raw data, consisting of over 6 hours of video & audio." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.551, + 0.208, + 0.568 + ], + "angle": 0, + "content": "Key Challenges" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.57, + 0.48, + 0.778 + ], + "angle": 0, + "content": "RealWebAssist features multiple challenges as illustrated in Figure 4, including spatial and temporal reasoning needed to understand ambiguous and context-dependent user instructions, planning for multiple steps of actions to reach the goal communicated by an instruction, and learning about user-specific routines. These key challenges provide a more realistic and holistic evaluation of a web agent's reasoning, planning, and learning abilities to assist real-world users on long-horizon tasks. It is worth noting that many of these challenges, in particular, spatial reasoning, temporal reasoning, and routine understanding, are not present in existing web agent benchmarks. Unlike RealWebAssist, prior benchmarks, such as ScreenSpot (Cheng et al. 2024), WebArena (Zhou et al. 2023), and Mind2Web (Deng et al. 2024), only include clear, unambiguous, and non-sequential instructions." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.779, + 0.48, + 0.89 + ], + "angle": 0, + "content": "Spatial Reasoning. When referring to one of the elements on a webpage, real-world users tend to use a concise instruction that can be understood conditioned on spatial context instead of an overly elaborated instruction. For instance, when instructing an assistant to buy a product, users may give short instructions such as \"select the cheapest one,\" instead of describing the desired product in detail. Figure 4A depicts different types of spatial reasoning that rely on di" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.07, + 0.913, + 0.152 + ], + "angle": 0, + "content": "verse spatial contexts, including ranking, spatial relations, and overall website functionalities. It is worth noting that these instructions may sometimes reveal users' preferences (e.g., preferred seating), providing additional information for the web agent to provide potentially more customized assistance in the future." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.171, + 0.913, + 0.422 + ], + "angle": 0, + "content": "Temporal Reasoning. In our sequential instruction following tasks, users may instruct an assistant with the history as an assumed temporal context. For example, to understand the intended meaning of \"click the last item,\" the assistant must memorize the items the user has viewed in the past. Figure 4B shows temporal reasoning based on different kinds of temporal context, ranging from short context between two consecutive webpages to long context with the same website to long context across websites. From the temporal context, the assistant needs to memorize crucial elements in the previous webpages, infer and track a user's mind (e.g., change of mind about what to buy) based on the past instructions and webpages, and identify the earlier webpage the user refers to. Such temporal reasoning has not been evaluated in prior web agent benchmarks. However, it is very common in our benchmark due to the nature of human web browsing behavior as well as human instructions guided by pragmatics (Goodman and Frank 2016)." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.44, + 0.914, + 0.593 + ], + "angle": 0, + "content": "Multi-step Planning. Many instructions require multiple steps to complete. In these cases, the assistant needs to interpret the goal implied by the instruction and plan a sequence of actions to achieve that goal. This goes beyond grounding the instruction to a single action on the current webpage. Figure 4C shows an example where the agent was asked to repeat the same order on another food delivery website to check if the price would be different. A successful execution of this instruction would require the agent to first understand what the order is to ground the goal on the current website and generate a successful multi-step plan." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.612, + 0.914, + 0.89 + ], + "angle": 0, + "content": "Routine. Since our benchmark allows a user to engage in repeated interactions with an assistant over multiple tasks, we observe that users may define routines understood by the assistant after repeated interactions. As shown in Figure 4D, the user initially gave detailed step-by-step instructions when selecting arrival and departure dates for a flight. In a subsequent task, however, the user simplified them into a single instruction when selecting dates for a hotel room. Such shorter instructions become possible after establishing a routine in the earlier task. Cognitive studies found that procedural abstraction, like these routines, naturally emerges in human cooperative communication through repeated interactions, allowing more efficient communication with partners (McCarthy et al. 2021). The emergence of such routines in our benchmark poses a novel challenge for web agents—learning user-specific procedural abstraction via repeated interactions to achieve human-like adaptive assistance. We hypothesize that this ability could enhance users' perception of the AI assistant, as it understands human cooperative communication." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.101, + 0.074, + 0.322, + 0.096 + ], + "angle": 0, + "content": "A Spatial Reasoning" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.098, + 0.276, + 0.113 + ], + "angle": 0, + "content": "Ranking" + }, + { + "type": "image_caption", + "bbox": [ + 0.13, + 0.118, + 0.211, + 0.136 + ], + "angle": 0, + "content": "\"Can you click on the seventh tab?\"" + }, + { + "type": "image", + "bbox": [ + 0.113, + 0.14, + 0.235, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.279, + 0.118, + 0.378, + 0.137 + ], + "angle": 0, + "content": "\"And let's just get the lowest price tickets\"" + }, + { + "type": "image", + "bbox": [ + 0.26, + 0.139, + 0.403, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.528, + 0.1, + 0.65, + 0.114 + ], + "angle": 0, + "content": "Spatial relations" + }, + { + "type": "image_caption", + "bbox": [ + 0.441, + 0.117, + 0.554, + 0.136 + ], + "angle": 0, + "content": "\"Can you click the arrow between the two\"" + }, + { + "type": "image", + "bbox": [ + 0.432, + 0.139, + 0.565, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.612, + 0.117, + 0.704, + 0.136 + ], + "angle": 0, + "content": "Only select the two seats on the top" + }, + { + "type": "image", + "bbox": [ + 0.591, + 0.139, + 0.723, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.75, + 0.099, + 0.883, + 0.112 + ], + "angle": 0, + "content": "Website functions" + }, + { + "type": "image_caption", + "bbox": [ + 0.769, + 0.118, + 0.868, + 0.136 + ], + "angle": 0, + "content": "\"Change the end date from 20 to 22nd\"" + }, + { + "type": "image", + "bbox": [ + 0.756, + 0.138, + 0.881, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.1, + 0.203, + 0.348, + 0.225 + ], + "angle": 0, + "content": "B Temporal Reasoning" + }, + { + "type": "image_caption", + "bbox": [ + 0.18, + 0.227, + 0.314, + 0.242 + ], + "angle": 0, + "content": "Previous webpage" + }, + { + "type": "image_caption", + "bbox": [ + 0.121, + 0.246, + 0.232, + 0.256 + ], + "angle": 0, + "content": "\"Goto the previous tab\"" + }, + { + "type": "image", + "bbox": [ + 0.109, + 0.258, + 0.235, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.279, + 0.246, + 0.386, + 0.256 + ], + "angle": 0, + "content": "\"No, stay on that page\"" + }, + { + "type": "image", + "bbox": [ + 0.261, + 0.258, + 0.394, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.227, + 0.782, + 0.242 + ], + "angle": 0, + "content": "Long context within the same website" + }, + { + "type": "image_caption", + "bbox": [ + 0.445, + 0.246, + 0.538, + 0.255 + ], + "angle": 0, + "content": "\"Click on HP laptop\"" + }, + { + "type": "image", + "bbox": [ + 0.426, + 0.257, + 0.557, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.606, + 0.246, + 0.714, + 0.256 + ], + "angle": 0, + "content": "\"Can you check ASUS?\"" + }, + { + "type": "image", + "bbox": [ + 0.597, + 0.257, + 0.723, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.757, + 0.246, + 0.892, + 0.256 + ], + "angle": 0, + "content": "\"Go back to the other laptop\"" + }, + { + "type": "image", + "bbox": [ + 0.77, + 0.257, + 0.887, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.355, + 0.323, + 0.632, + 0.336 + ], + "angle": 0, + "content": "Long context across multiple websites" + }, + { + "type": "image_caption", + "bbox": [ + 0.12, + 0.34, + 0.214, + 0.358 + ], + "angle": 0, + "content": "\"Can you look at the next tab as well?\"" + }, + { + "type": "image", + "bbox": [ + 0.105, + 0.362, + 0.234, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.158, + 0.423, + 0.18, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.269, + 0.34, + 0.402, + 0.359 + ], + "angle": 0, + "content": "\"Oh, this is like 95 bucks. Can you press the other tab\"" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.362, + 0.433, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.299, + 0.424, + 0.365, + 0.438 + ], + "angle": 0, + "content": "CityPASS" + }, + { + "type": "image_caption", + "bbox": [ + 0.435, + 0.34, + 0.548, + 0.359 + ], + "angle": 0, + "content": "\"OK, can you open a new tab and search for ...\"" + }, + { + "type": "image", + "bbox": [ + 0.431, + 0.362, + 0.559, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.486, + 0.424, + 0.508, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.591, + 0.34, + 0.729, + 0.358 + ], + "angle": 0, + "content": "\"This is 36. Can you go back to CN Tower's official website\"" + }, + { + "type": "image", + "bbox": [ + 0.559, + 0.362, + 0.725, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.648, + 0.424, + 0.676, + 0.443 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.777, + 0.34, + 0.872, + 0.359 + ], + "angle": 0, + "content": "\"I'd probably get the city pass option\"" + }, + { + "type": "image", + "bbox": [ + 0.765, + 0.362, + 0.887, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.818, + 0.424, + 0.841, + 0.441 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.101, + 0.449, + 0.344, + 0.469 + ], + "angle": 0, + "content": "C Multi-step planning" + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.47, + 0.391, + 0.489 + ], + "angle": 0, + "content": "\"Can you go to DoorDash and order the same thing to compare the price?\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.097, + 0.493, + 0.215, + 0.54 + ], + "angle": 0, + "content": "History (not shown here): The user previously ordered Snooze melt from Meltdown and selected French Fries" + }, + { + "type": "image", + "bbox": [ + 0.217, + 0.49, + 0.347, + 0.542 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.494, + 0.49, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.49, + 0.63, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.49, + 0.755, + 0.542 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.789, + 0.49, + 0.885, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.102, + 0.552, + 0.216, + 0.569 + ], + "angle": 0, + "content": "D Routine" + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.553, + 0.34, + 0.562 + ], + "angle": 0, + "content": "\"Can we go to the dates?\"" + }, + { + "type": "image", + "bbox": [ + 0.221, + 0.565, + 0.34, + 0.624 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.254, + 0.625, + 0.391, + 0.634 + ], + "angle": 0, + "content": "\"And for dates do 3.17 to 3.21\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.396, + 0.553, + 0.514, + 0.562 + ], + "angle": 0, + "content": "\"Can we select April 7th?\"" + }, + { + "type": "image", + "bbox": [ + 0.399, + 0.565, + 0.51, + 0.623 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.592, + 0.552, + 0.69, + 0.561 + ], + "angle": 0, + "content": "\"And then April 14th\"" + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.564, + 0.7, + 0.623 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.793, + 0.552, + 0.864, + 0.561 + ], + "angle": 0, + "content": "\"And hit done\"" + }, + { + "type": "image", + "bbox": [ + 0.772, + 0.563, + 0.892, + 0.623 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.586, + 0.201, + 0.618 + ], + "angle": 0, + "content": "Earlier task: select dates for a round-trip flight" + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.647, + 0.217, + 0.667 + ], + "angle": 0, + "content": "Later task: select dates for a hotel stay" + }, + { + "type": "image", + "bbox": [ + 0.259, + 0.636, + 0.377, + 0.692 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.255, + 0.625, + 0.391, + 0.634 + ], + "angle": 0, + "content": "\"And for dates do 3.17 to 3.21\"" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.635, + 0.623, + 0.691 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.749, + 0.635, + 0.872, + 0.691 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.083, + 0.709, + 0.913, + 0.739 + ], + "angle": 0, + "content": "Figure 4: Key challenges introduced by RealWebAssist: (A) spatial reasoning, (B) temporal reasoning, (C) multi-step planning, and (D) learning user-specific routines." + }, + { + "type": "title", + "bbox": [ + 0.227, + 0.764, + 0.337, + 0.781 + ], + "angle": 0, + "content": "Experiments" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.785, + 0.16, + 0.799 + ], + "angle": 0, + "content": "Baselines" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.805, + 0.479, + 0.862 + ], + "angle": 0, + "content": "We evaluated several types of models for web agents commonly evaluated in existing web agent benchmarks that have real-world websites (i.e., offline evaluation). For all the experiments, we use the ground-truth captions for instructions." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.862, + 0.479, + 0.89 + ], + "angle": 0, + "content": "GUI Grounding Models. GUI grounding models directly translate an instruction to an action on a webpage. There are" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.765, + 0.914, + 0.877 + ], + "angle": 0, + "content": "two general types of grounding models. First, Set-of-Mark (SoM) (Yang et al. 2023) segments salient elements on a webpage using an off-the-shelf segmentation model (e.g., SAM (Kirillov et al. 2023) and Semantic-SAM (Li et al. 2023)) and prompts a VLM to select a segment mask to identify the clicking area corresponding to the given instruction. Second, VLMs finetuned on datasets with paired instructions and annotated clicking coordinates or bounding" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.084, + 0.069, + 0.479, + 0.125 + ], + "angle": 0, + "content": "boxes. We evaluated UGround-V1 (Gou et al. 2024), OSAtlas (Wu et al. 2024b), Aria-UI (Yang et al. 2024), GTA-1 (Yang et al. 2025), GUI-Actor (Wu et al. 2024a), and UI-TARS (Qin et al. 2025)." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.126, + 0.48, + 0.497 + ], + "angle": 0, + "content": "VLM/LRM + Grounding. Grounding models are designed or trained to ground a simple instruction to a webpage and thus tend to lack reasoning or planning capabilities. To address this, we leveraged VLMs and LRMs to first translate real user instructions to more understandable ones for grounding models. In particular, a VLM or an LRM needs to reason about the true user intent implied by the instruction and the spatial & temporal context. For instructions that require multiple actions, it needs to generate a plan to complete the instructions. Finally, it needs to generate a straightforward, clear instruction for the grounding model to produce the final action at each step. We evaluated state-of-the-art VLMs (OpenAI 2023; Team 2025; Qwen et al. 2025), as well as state-of-the-art LRMs (Jaech et al. 2024; Team 2025; Anthropic 2025). In the main results, we paired each VLM and LRM with the grounding model that achieved the highest step accuracy (GTA-1). For all VLMs and LRMs, we provide the past 10 steps for context, which we found to be a reasonable fixed context length in our preliminary study, balancing cost and informativeness. We also found that prompting models with screenshots of past webpages could incur a high cost. Therefore, we only prompt the models with the screenshot of the current webpage. For the history, we prompted GPT-4o to generate text-based action history based on consecutive screenshots and the instructions at each step. We then used this text-based history description for the evaluated VLMs and LRMs." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.499, + 0.48, + 0.665 + ], + "angle": 0, + "content": "Finetuning. To evaluate whether models can learn to better follow real-world user instructions with additional training, we finetuned the best-performing grounding model (GTA-1) following the model's original group relative policy optimization (GRPO) training procedure (Yang et al. 2025) on 9 participants' data and tested it on the held-out participants' instructions. Specifically, we trained the grounding model to produce an action based on the past 10 steps of actions (in text), the current webpage screenshot, and the instruction. We enumerated different train/test splits and reported the averaged performance, either using the finetuned model alone or pairing it with the best VLM or LRM." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.677, + 0.145, + 0.69 + ], + "angle": 0, + "content": "Results" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.695, + 0.481, + 0.89 + ], + "angle": 0, + "content": "Main results are summarized in Table 3. All models fell short in following real user instructions. The highest task success rate was only \\(14.0\\%\\) and the highest average progress was only \\(28.7\\%\\) a large gap compared to humans \\((93.4\\%)\\) task success rate). This difference has a \\(95\\%\\) confidence interval of [71.3, 87.5], and is highly significant with p-value \\(< 0.0001\\). Grounding methods by themselves failed to finish most tasks. However, when paired with the best-performing grounding model (GTA-1), instructions generated by VLMs & LRMs significantly improved the performance. LRMs performed marginally better than most VLMs. Across all three metrics, Gemini 2.5 Flash, Gemini 2.5 Pro, and o3 showed the strongest performance. Finetuning GTA-1 on real user data marginally improved its perfor" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.07, + 0.913, + 0.126 + ], + "angle": 0, + "content": "mance, but finetuning offered no benefit when GTA-1 was paired with VLMs and LRMs, since the finetuned model is trained to adapt to real users' instructions instead of instructions generated by VLM or LRM." + }, + { + "type": "title", + "bbox": [ + 0.669, + 0.138, + 0.762, + 0.152 + ], + "angle": 0, + "content": "Discussion" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.155, + 0.913, + 0.334 + ], + "angle": 0, + "content": "Can grounding models understand real-world user instructions? There remains a significant gap in the performance of current direct grounding methods. The best grounding model, GUI-Actor, has a task success rate of only \\(5.7\\%\\). Figure 5 illustrates various failure cases encountered when directly using GTA-1. Unsurprisingly, grounding models fail to interpret instructions requiring reasoning due to their limited reasoning capabilities. However, even for context-free instructions involving straightforward spatial reasoning—tasks where grounding methods should excel—they frequently misinterpret spatial layouts or rankings. For instance, they often incorrectly select elements for instructions such as \"click the first one.\"" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.335, + 0.913, + 0.501 + ], + "angle": 0, + "content": "How can VLMs & LRMs help? VLMs or LRMs can convert the original user instructions into more direct and explicit descriptions that a grounding model can more easily understand. This is made possible by their reasoning capacities. For instance, in Figure 5A, the grounding model (GTA-1) on its own fails to select the first tab: it selects the first element instead of the first tab. However, it succeeds after o3 rewrites the instruction to refer to the title. As shown in Figure 5B, grounding models may sometimes still fail due to inherent limitations even when VLMs/LRMs generate clearer instructions. Nonetheless, incorporating VLMs or LRMs significantly improves overall performance." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.502, + 0.914, + 0.75 + ], + "angle": 0, + "content": "What are the limitations of VLMs & LRMs? While VLMs and LRMs help, the highest task success rate is still only \\(14.0\\%\\). Beyond errors from grounding models (e.g., Figure 5B), they continue to struggle with complex temporal reasoning. In Figure 5C, the user previously asked to open the first two search results in new tabs. When later instructed to \"look at the first one we just opened,\" o3 failed to identify which element \"the first one\" referred to—instead of the first newly opened tab, it pointed to the first search result. We further analyze the error distribution between reasoning errors (the VLM/LRM mistranslates the instruction and refers to the wrong element) and grounding errors (the rewritten instruction is correct, but the grounding model still fails to click the right element). For the best model \\((\\mathrm{o}3 + \\mathrm{GTA} - 1)\\), \\(43.3\\%\\) of errors are grounding errors and \\(56.7\\%\\) are reasoning errors. This suggests that current VLMs and LRMs still lack the reasoning and planning abilities needed to robustly perform sequential instruction-following tasks." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.751, + 0.915, + 0.89 + ], + "angle": 0, + "content": "Does learning from real-world user data help? Finetuning GTA-1 marginally improved average progress and step accuracy but yielded no additional benefit when paired with VLMs and LRMs. These results show that the finetuned model better understands real user instructions, yet it still fails to generalize to instructions generated by VLMs and LRMs. The experiments suggest that finetuning grounding models on a small set of real user instructions provides minimal benefit, and collecting large-scale real user instructions remains a significant challenge." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.148, + 0.065, + 0.851, + 0.366 + ], + "angle": 0, + "content": "
CategoryModelTask SuccessProgressStep Accuracy
HumanHuman Operator93.496.499.2
GroundingSet-of-Mark0.02.729.8
OS-Atlas0.03.826.6
Aria-UI0.02.432.8
UGround-V10.06.247.7
UI-TARS2.813.153.8
GTA-13.717.761.5
GUI-Actor5.714.761.4
VLM + GroundingGPT-4o + GTA-18.423.572.7
Qwen 2.5 72B + GTA-19.324.369.0
Gemini 2.5 Flash + GTA-111.226.975.4
LRM + Groundingo1 + GTA-17.517.768.2
Gemini 2.5 Pro + GTA-18.423.574.5
o4-mini + GTA-110.321.767.1
Claude 3.7 Sonnet + GTA-112.126.768.8
o3 + GTA-114.028.776.7
FinetunedGTA-1-F3.7 (+0.0)19.7 (+2.0)64.3 (+2.8)
Gemini 2.5 Flash + GTA-1-F11.2 (+0.0)26.9 (+0.0)75.4 (+0.0)
o3 + GTA-1-F14.0 (+0.0)28.7 (+0.0)76.7 (+0.0)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.376, + 0.916, + 0.42 + ], + "angle": 0, + "content": "Table 2: Model Performance including task success rate, average progress, and step accuracy. All results are in %. The best performance of pretrained models and finetuned models is highlighted in bold. GTA-1-F indicates the finetuned GTA-1. Plus sign indicates the improvement compared to using the raw model for the same set of instructions." + }, + { + "type": "image", + "bbox": [ + 0.09, + 0.453, + 0.325, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.327, + 0.453, + 0.547, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.453, + 0.892, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.689, + 0.913, + 0.72 + ], + "angle": 0, + "content": "Figure 5: Qualitative results. The captions show instructions generated by o3 (the best LRM). (A) Error corrected by using o3 to convert instructions. (B) Failure caused by GTA-1 when o3 reasons correctly. (C) Reasoning failure caused by o3." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.745, + 0.482, + 0.886 + ], + "angle": 0, + "content": "Limitations. RealWebAssist represents an important first step towards evaluating web agents on long-horizon, real-user tasks. However, it has several limitations. The first is participant scale and diversity. Collecting real-user data is expensive and time-consuming. The number of participants is comparable to prior works that use expert annotators (Lu, Kasner, and Reddy 2024). However, we intend to increase user diversity in future versions of the benchmark. We will also open-source our data collection tools for community expansion of the dataset. Second, like prior benchmarks on" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.745, + 0.915, + 0.871 + ], + "angle": 0, + "content": "real-world websites (Deng et al. 2024; Cheng et al. 2024), we constrain our evaluation to an offline setting to ensure reproducibility and safety. This is complementary to benchmarks that focus on interactive evaluation in sandbox environments (e.g., WebArena). We believe that web agents should be evaluated on both types of benchmarks to fully assess their capabilities. Lastly, the current setting does not allow dialogue between a user and the AI assistant, which we will explore in future work." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.233, + 0.068, + 0.331, + 0.083 + ], + "angle": 0, + "content": "Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.087, + 0.481, + 0.312 + ], + "angle": 0, + "content": "In this paper, we present RealWebAssist, the first benchmark for evaluating web agents' ability to provide long-horizon web assistance with real-world users via sequential instruction-following. Our benchmark poses novel challenges, including spatial and temporal reasoning, planning, and adapting to user-specific routines. We conducted a comprehensive evaluation and analysis on multiple state-of-the-art GUI grounding models, VLMs, and LRMs, revealing critical limitations of them. We have also shown the limited benefit of finetuning models on real user data. Our benchmark, along with the well-annotated user instruction dataset, provides resources and diagnostic tools for further research on real-world web assistance. In future work, we plan to expand our human study to include more participants from various backgrounds, examine web assistance in interactive settings, and incorporate chat between users and web agents." + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.323, + 0.365, + 0.339 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.342, + 0.479, + 0.37 + ], + "angle": 0, + "content": "This work was supported by a research grant from Amazon. We thank Janice Chen for helpful discussions." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.384, + 0.33, + 0.398 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.403, + 0.48, + 0.444 + ], + "angle": 0, + "content": "Anthropic. 2025. Claude 3.7 Sonnet and Claude Code. https://www.anthropic.com/news/claudi-3-7-sonnet. Accessed: 2025-03-17." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.447, + 0.48, + 0.476 + ], + "angle": 0, + "content": "Cai, T.; Wang, X.; Ma, T.; Chen, X.; and Zhou, D. 2024. Large Language Models as Tool Makers. arXiv:2305.17126." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.479, + 0.48, + 0.521 + ], + "angle": 0, + "content": "Cheng, K.; Sun, Q.; Chu, Y.; Xu, F.; Li, Y.; Zhang, J.; and Wu, Z. 2024. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.524, + 0.48, + 0.579 + ], + "angle": 0, + "content": "Deng, X.; Gu, Y.; Zheng, B.; Chen, S.; Stevens, S.; Wang, B.; Sun, H.; and Su, Y. 2024. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.582, + 0.479, + 0.624 + ], + "angle": 0, + "content": "Fried, D.; Tomlin, N.; Hu, J.; Patel, R.; and Nematzadeh, A. 2023. Pragmatics in Language Grounding: Phenomena, Tasks, and Modeling Approaches. arXiv:2211.08371." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.627, + 0.48, + 0.668 + ], + "angle": 0, + "content": "Goodman, N. D.; and Frank, M. C. 2016. Pragmatic language interpretation as probabilistic inference. Trends in cognitive sciences, 20(11): 818-829." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.671, + 0.48, + 0.728 + ], + "angle": 0, + "content": "Gou, B.; Wang, R.; Zheng, B.; Xie, Y.; Chang, C.; Shu, Y.; Sun, H.; and Su, Y. 2024. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.73, + 0.48, + 0.785 + ], + "angle": 0, + "content": "Gur, I.; Furuta, H.; Huang, A.; Safdari, M.; Matsuo, Y.; Eck, D.; and Faust, A. 2024. A Real-World WebAgent with Planning, Long Context Understanding, and Program Synthesis. arXiv:2307.12856." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.789, + 0.48, + 0.831 + ], + "angle": 0, + "content": "He, H.; Yao, W.; Ma, K.; Yu, W.; Dai, Y.; Zhang, H.; Lan, Z.; and Yu, D. 2024. WebVoyager: Building an End-to-End Web Agent with Large Multimodal Models. arXiv:2401.13919." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.834, + 0.48, + 0.889 + ], + "angle": 0, + "content": "Hong, W.; Wang, W.; Lv, Q.; Xu, J.; Yu, W.; Ji, J.; Wang, Y.; Wang, Z.; Zhang, Y.; Li, J.; Xu, B.; Dong, Y.; Ding, M.; and Tang, J. 2024. CogAgent: A Visual Language Model for GUI Agents. arXiv:2312.08914." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.403, + 0.48, + 0.889 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.07, + 0.913, + 0.139 + ], + "angle": 0, + "content": "Humphreys, P. C.; Raposo, D.; Pohlen, T.; Thornton, G.; Chhaparia, R.; Muldal, A.; Abramson, J.; Georgiev, P.; Santoro, A.; and Lillicrap, T. 2022. A data-driven approach for learning to control computers. In International Conference on Machine Learning, 9466-9482. PMLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.142, + 0.914, + 0.197 + ], + "angle": 0, + "content": "Jaech, A.; Kalai, A.; Lerer, A.; Richardson, A.; El-Kishky, A.; Low, A.; Helyar, A.; Madry, A.; Beutel, A.; Carney, A.; et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.201, + 0.913, + 0.257 + ], + "angle": 0, + "content": "Jang, L.; Li, Y.; Zhao, D.; Ding, C.; Lin, J.; Liang, P. P.; Bonatti, R.; and Koishida, K. 2024. Videowebarena: Evaluating long context multimodal agents with video understanding web tasks. arXiv preprint arXiv:2410.19100." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.259, + 0.914, + 0.315 + ], + "angle": 0, + "content": "Kim, M.; Bursztyn, V.; Koh, E.; Guo, S.; and Hwang, S.-w. 2024. Rada: Retrieval-augmented web agent planning with llms. In Findings of the Association for Computational Linguistics ACL 2024, 13511-13525." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.318, + 0.914, + 0.373 + ], + "angle": 0, + "content": "Kirillov, A.; Mintun, E.; Ravi, N.; Mao, H.; Rolland, C.; Gustafson, L.; Xiao, T.; Whitehead, S.; Berg, A. C.; Lo, W.-Y.; Dollar, P.; and Girshick, R. 2023. Segment Anything. arXiv:2304.02643." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.376, + 0.914, + 0.432 + ], + "angle": 0, + "content": "Li, F.; Zhang, H.; Sun, P.; Zou, X.; Liu, S.; Yang, J.; Li, C.; Zhang, L.; and Gao, J. 2023. Semantic-SAM: Segment and Recognize Anything at Any Granularity. arXiv preprint arXiv:2307.04767." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.435, + 0.913, + 0.478 + ], + "angle": 0, + "content": "Liu, E. Z.; Guu, K.; Pasupat, P.; Shi, T.; and Liang, P. 2018. Reinforcement learning on web interfaces using workflow-guided exploration. arXiv preprint arXiv:1802.08802." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.48, + 0.914, + 0.535 + ], + "angle": 0, + "content": "Liu, Z.; Yao, W.; Zhang, J.; Xue, L.; Heinecke, S.; Murthy, R.; Feng, Y.; Chen, Z.; Niebles, J. C.; Arpit, D.; et al. 2023. Bolaa: Benchmarking and orchestrating llm-augmented autonomous agents. arXiv preprint arXiv:2308.05960." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.538, + 0.914, + 0.58 + ], + "angle": 0, + "content": "Lü, X. H.; Kasner, Z.; and Reddy, S. 2024. Weblinx: Realworld website navigation with multi-turn dialogue. arXiv preprint arXiv:2402.05930." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.582, + 0.914, + 0.638 + ], + "angle": 0, + "content": "Majumder, B. P.; Mishra, B. D.; Jansen, P.; Tafjord, O.; Tandon, N.; Zhang, L.; Callison-Burch, C.; and Clark, P. 2023. CLIN: A Continually Learning Language Agent for Rapid Task Adaptation and Generalization. arXiv:2310.10134." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.641, + 0.914, + 0.683 + ], + "angle": 0, + "content": "McCarthy, W. P.; Hawkins, R. D.; Wang, H.; Holdaway, C.; and Fan, J. E. 2021. Learning to communicate about shared procedural abstractions. arXiv preprint arXiv:2107.00077." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.686, + 0.914, + 0.768 + ], + "angle": 0, + "content": "Nakano, R.; Hilton, J.; Balaji, S.; Wu, J.; Ouyang, L.; Kim, C.; Hesse, C.; Jain, S.; Kosaraju, V.; Saunders, W.; Jiang, X.; Cobbe, K.; Eloundou, T.; Krueger, G.; Button, K.; Knight, M.; Chess, B.; and Schulman, J. 2022. WebGPT: Browser-assisted question-answering with human feedback. arXiv:2112.09332." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.772, + 0.914, + 0.814 + ], + "angle": 0, + "content": "Nogueira, R.; and Cho, K. 2016. End-to-end goal-driven web navigation. Advances in neural information processing systems, 29." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.817, + 0.914, + 0.843 + ], + "angle": 0, + "content": "OpenAI. 2023. GPT-4 Technical Report. ArXiv, abs/2303.08774." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.847, + 0.914, + 0.89 + ], + "angle": 0, + "content": "Pan, J.; Zhang, Y.; Tomlin, N.; Zhou, Y.; Levine, S.; and Suhr, A. 2024. Autonomous Evaluation and Refinement of Digital Agents. arXiv:2404.06474." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.07, + 0.914, + 0.89 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.069, + 0.482, + 0.111 + ], + "angle": 0, + "content": "Pang, R. Y.; Yuan, W.; Cho, K.; He, H.; Sukhbaatar, S.; and Weston, J. 2024. Iterative Reasoning Preference Optimization. arXiv:2404.19733." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.116, + 0.482, + 0.173 + ], + "angle": 0, + "content": "Putta, P.; Mills, E.; Garg, N.; Motwani, S.; Finn, C.; Garg, D.; and Rafailov, R. 2024. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.176, + 0.482, + 0.234 + ], + "angle": 0, + "content": "Qin, Y.; Ye, Y.; Fang, J.; Wang, H.; Liang, S.; Tian, S.; Zhang, J.; Li, J.; Li, Y.; Huang, S.; et al. 2025. UI-TARS: Pioneering Automated GUI Interaction with Native Agents. arXiv preprint arXiv:2501.12326." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.238, + 0.482, + 0.35 + ], + "angle": 0, + "content": "Qwen;.; Yang, A.; Yang, B.; Zhang, B.; Hui, B.; Zheng, B.; Yu, B.; Li, C.; Liu, D.; Huang, F.; Wei, H.; Lin, H.; Yang, J.; Tu, J.; Zhang, J.; Yang, J.; Yang, J.; Zhou, J.; Lin, J.; Dang, K.; Lu, K.; Bao, K.; Yang, K.; Yu, L.; Li, M.; Xue, M.; Zhang, P.; Zhu, Q.; Men, R.; Lin, R.; Li, T.; Tang, T.; Xia, T.; Ren, X.; Ren, X.; Fan, Y.; Su, Y.; Zhang, Y.; Wan, Y.; Liu, Y.; Cui, Z.; Zhang, Z.; and Qiu, Z. 2025. Qwen2.5 Technical Report. arXiv:2412.15115." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.354, + 0.482, + 0.411 + ], + "angle": 0, + "content": "Radford, A.; Kim, J. W.; Xu, T.; Brockman, G.; McLeavey, C.; and Sutskever, I. 2023. Robust speech recognition via large-scale weak supervision. In International conference on machine learning, 28492-28518. PMLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.415, + 0.482, + 0.471 + ], + "angle": 0, + "content": "Reddy, C. K.; Beyrami, E.; Pool, J.; Cutler, R.; Srinivasan, S.; and Gehrke, J. 2019. A scalable noisy speech dataset and online subjective test framework. arXiv preprint arXiv:1909.08050." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.476, + 0.482, + 0.533 + ], + "angle": 0, + "content": "Shi, T.; Karpathy, A.; Fan, L.; Hernandez, J.; and Liang, P. 2017. World of bits: An open-domain platform for web-based agents. In International Conference on Machine Learning, 3135-3144. PMLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.537, + 0.482, + 0.58 + ], + "angle": 0, + "content": "Song, Y.; Thai, K.; Pham, C. M.; Chang, Y.; Nadaf, M.; and Iyyer, M. 2025. Bearcubs: A benchmark for computer-using web agents. arXiv preprint arXiv:2503.07919." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.584, + 0.482, + 0.627 + ], + "angle": 0, + "content": "Team. 2025. Gemini 2.5: Pushing the Frontier with Advanced Reasoning, Multimodality, Long Context, and Next Generation Agentic Capabilities. arXiv:2507.06261." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.631, + 0.482, + 0.66 + ], + "angle": 0, + "content": "Wang, Z. Z.; Mao, J.; Fried, D.; and Neubig, G. 2024. Agent workflow memory. arXiv preprint arXiv:2409.07429." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.664, + 0.482, + 0.72 + ], + "angle": 0, + "content": "Wu, Q.; Cheng, K.; Yang, R.; Zhang, C.; Yang, J.; Jiang, H.; Mu, J.; Peng, B.; Qiao, B.; Tan, R.; et al. 2025. GUI-Actor: Coordinate-Free Visual Grounding for GUI Agents. arXiv preprint arXiv:2506.03143." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.725, + 0.482, + 0.768 + ], + "angle": 0, + "content": "Wu, Z.; Han, C.; Ding, Z.; Weng, Z.; Liu, Z.; Yao, S.; Yu, T.; and Kong, L. 2024a. OS-Copilot: Towards Generalist Computer Agents with Self-Improvement. arXiv:2402.07456." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.772, + 0.482, + 0.829 + ], + "angle": 0, + "content": "Wu, Z.; Wu, Z.; Xu, F.; Wang, Y.; Sun, Q.; Jia, C.; Cheng, K.; Ding, Z.; Chen, L.; Liang, P. P.; et al. 2024b. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.833, + 0.482, + 0.889 + ], + "angle": 0, + "content": "Xu, Y.; Wang, Z.; Wang, J.; Lu, D.; Xie, T.; Saha, A.; Sahoo, D.; Yu, T.; and Xiong, C. 2024. Aguvis: Unified Pure Vision Agents for Autonomous GUI Interaction. arXiv:2412.04454." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.069, + 0.482, + 0.889 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.069, + 0.914, + 0.125 + ], + "angle": 0, + "content": "Xu, Y.; Wang, Z.; Wang, J.; Lu, D.; Xie, T.; Saha, A.; Sahoo, D.; Yu, T.; and Xiong, C. 2025. Aguvis: Unified Pure Vision Agents for Autonomous GUI Interaction. arXiv:2412.04454." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.127, + 0.915, + 0.17 + ], + "angle": 0, + "content": "Yang, J.; Zhang, H.; Li, F.; Zou, X.; Li, C.; and Gao, J. 2023. Set-of-Mark Prompting Unleashes Extraordinary Visual Grounding in GPT-4V. arXiv preprint arXiv:2310.11441." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.173, + 0.915, + 0.215 + ], + "angle": 0, + "content": "Yang, Y.; Li, D.; Dai, Y.; Yang, Y.; Luo, Z.; Zhao, Z.; Hu, Z.; Huang, J.; Saha, A.; Chen, Z.; et al. 2025. GTA1: GUI Test-time Scaling Agent. arXiv preprint arXiv:2507.05791." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.218, + 0.915, + 0.26 + ], + "angle": 0, + "content": "Yang, Y.; Wang, Y.; Li, D.; Luo, Z.; Chen, B.; Huang, C.; and Li, J. 2024. Aria-UI: Visual Grounding for GUI Instructions. arXiv preprint arXiv:2412.16256." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.262, + 0.915, + 0.317 + ], + "angle": 0, + "content": "Yao, S.; Chen, H.; Yang, J.; and Narasimhan, K. 2022. Webshop: Towards scalable real-world web interaction with grounded language agents. Advances in Neural Information Processing Systems, 35: 20744-20757." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.32, + 0.915, + 0.363 + ], + "angle": 0, + "content": "Yao, S.; Zhao, J.; Yu, D.; Du, N.; Shafran, I.; Narasimhan, K.; and Cao, Y. 2023. ReAct: Synergizing Reasoning and Acting in Language Models. arXiv:2210.03629." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.365, + 0.914, + 0.42 + ], + "angle": 0, + "content": "Ying, L.; Liu, J. X.; Aanya, S.; Fang, Y.; Tellex, S.; Tenenbaum, J. B.; and Shu, T. 2024. SIFToM: Robust Spoken Instruction Following through Theory of Mind. arXiv:2409.10849." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.423, + 0.914, + 0.479 + ], + "angle": 0, + "content": "Yuan, X.; Zhang, J.; Li, K.; Cai, Z.; Yao, L.; Chen, J.; Wang, E.; Hou, Q.; Chen, J.; Jiang, P.-T.; and Li, B. 2025. Enhancing Visual Grounding for GUI Agents via Self-Evolutionary Reinforcement Learning. arXiv:2505.12370." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.481, + 0.914, + 0.524 + ], + "angle": 0, + "content": "Zhang, C.; Yang, Z.; Liu, J.; Han, Y.; Chen, X.; Huang, Z.; Fu, B.; and Yu, G. 2023. AppAgent: Multimodal Agents as Smartphone Users. arXiv:2312.13771." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.526, + 0.914, + 0.556 + ], + "angle": 0, + "content": "Zhang, Z.; and Zhang, A. 2024. You Only Look at Screens: Multimodal Chain-of-Action Agents. arXiv:2309.11436." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.558, + 0.914, + 0.6 + ], + "angle": 0, + "content": "Zheng, B.; Gou, B.; Kil, J.; Sun, H.; and Su, Y. 2024a. Gpt-4v (ision) is a generalist web agent, if grounded. arXiv preprint arXiv:2401.01614." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.602, + 0.914, + 0.644 + ], + "angle": 0, + "content": "Zheng, L.; Wang, R.; Wang, X.; and An, B. 2024b. Synapse: Trajectory-as-Exemplar Prompting with Memory for Computer Control. arXiv:2306.07863." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.647, + 0.914, + 0.702 + ], + "angle": 0, + "content": "Zhou, A.; Yan, K.; Shlapentokh-Rothman, M.; Wang, H.; and Wang, Y.-X. 2024. Language Agent Tree Search Unifies Reasoning Acting and Planning in Language Models. arXiv:2310.04406." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.705, + 0.914, + 0.762 + ], + "angle": 0, + "content": "Zhou, S.; Xu, F. F.; Zhu, H.; Zhou, X.; Lo, R.; Sridhar, A.; Cheng, X.; Ou, T.; Bisk, Y.; Fried, D.; et al. 2023. Webarena: A realistic web environment for building autonomous agents. arXiv preprint arXiv:2307.13854." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.069, + 0.915, + 0.762 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.44, + 0.083, + 0.562, + 0.109 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.178, + 0.189, + 0.388, + 0.206 + ], + "angle": 0, + "content": "More experiment results" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.209, + 0.39, + 0.224 + ], + "angle": 0, + "content": "Full VLM & LRM + Grounding results" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.226, + 0.48, + 0.31 + ], + "angle": 0, + "content": "For the best three grounding models, GTA-1 (Yang et al. 2025), GUI-Actor (Wu et al. 2025) and UI-TARS (Qin et al. 2025), we test their pairing with all the VLMs and LRMs. Table 3 shows the full results. All the evaluation experiments are run on a single A100 GPU for 20 - 40 minutes. Finetuning GTA-1 model takes 4 hours on 4 A100 GPUs." + }, + { + "type": "image_caption", + "bbox": [ + 0.086, + 0.321, + 0.406, + 0.337 + ], + "angle": 0, + "content": "Experiment with different context lengths" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.355, + 0.475, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.086, + 0.594, + 0.48, + 0.623 + ], + "angle": 0, + "content": "Figure 6: Effect of context length on Gemini 2.5 Flash + GTA-1." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.638, + 0.48, + 0.805 + ], + "angle": 0, + "content": "We evaluated the best-performing VLM (Gemini 2.5 Flash) + GTA-1 with varying history context lengths, from no history to 20 steps. An ideal assistant should be able to leverage different kinds of historical context based on different instructions, ranging from no history to multi-task history context (e.g., for routine learning). As shown in Figure 6, increasing context length also does not necessarily lead to better performance. Gemini 2.5 Flash + GTA-1 achieved the highest task success rate with a context length of 10, and increasing the context length further led to poorer performance. This suggest the limitation of VLM in effectively utilizing historical context for reasoning." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.816, + 0.364, + 0.832 + ], + "angle": 0, + "content": "Effect of Speech Recognition Errors" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.834, + 0.48, + 0.891 + ], + "angle": 0, + "content": "All baseline experiments use the ground truth transcripts of user speech instructions as input to ensure that performance is not affected by errors in speech-to-text transcription. However, in real-world settings, instructions are of-" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.19, + 0.913, + 0.468 + ], + "angle": 0, + "content": "ten given via speech. To reflect this, we evaluated the effect of speech recognition on the agent's performance by using the transcripts generated from a state-of-the-art automatic speech recognition (ASR) model, Whisper LargeV3 (Radford et al. 2023). Additionally, since users may not always be in quiet, controlled environments using a high-quality microphone like in our user experiment setup, we simulated noisy environments by injecting background noise with noise files from the Microsoft Scalable Noisy Speech Dataset (MS-SNSD) dataset (Reddy et al. 2019), following (Ying et al. 2024). The noise files include people talking in the background and keyboard typing sounds. As shown in Table 4, using speech recognition resulted in a \\(1.9\\%\\) drop in task success rate, and having noisy speech resulted in a further \\(1.9\\%\\) drop. In contrast, the word error rate (WER) of the ASR results increased from \\(1.4\\%\\) (original speech) to \\(28.1\\%\\) (noisy speech), a much larger performance drop compared to the final task performance. This result suggests that reasoning the true meanings of speech instructions by leveraging context can help mitigate errors from ASR." + }, + { + "type": "title", + "bbox": [ + 0.595, + 0.484, + 0.838, + 0.5 + ], + "angle": 0, + "content": "Dataset Construction Details" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.507, + 0.913, + 0.646 + ], + "angle": 0, + "content": "Video Segmenting. As shown in the video example, the interactive sessions are highly dynamic, and spoken instructions do not always align cleanly with specific screens or timesteps. Automatically segmenting instructions and matching them to corresponding webpages and actions using heuristics would risk significantly degrading data quality. Therefore, we manually segment the live sessions using video editing software to construct the final RealWebAssist dataset. All participants provided consent to have their speech recorded and included in this dataset." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.647, + 0.913, + 0.758 + ], + "angle": 0, + "content": "Bounding Box Labeling. As shown in Figure 7, certain instructions like \"close all the tabs\" may correspond to multiple valid actions, since closing any of the tabs first would be reasonable. Therefore, we add bounding boxes to all of the elements that would be correct. The bounding boxes are drawn manually using a Python tool built with tkinter, and the clickable regions are determined by a visual inspection of the webpage." + }, + { + "type": "title", + "bbox": [ + 0.627, + 0.775, + 0.805, + 0.791 + ], + "angle": 0, + "content": "More Dataset Details" + }, + { + "type": "title", + "bbox": [ + 0.52, + 0.798, + 0.654, + 0.813 + ], + "angle": 0, + "content": "Evaluation detail" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.82, + 0.914, + 0.891 + ], + "angle": 0, + "content": "User instructions in RealWebAssist require different operations on the webpage, including clicking, scrolling and typing. We believe that action types other than clicking is trivial (for typing actions, the benchmark includes the step of finding the correct place to type instead of the actual typing" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.169, + 0.065, + 0.83, + 0.404 + ], + "angle": 0, + "content": "
VLM + GTA-1GPT-4o + GTA-18.423.572.7
Qwen 2.5 72B + GTA-19.324.369.0
Gemini 2.5 Flash + GTA-111.226.975.4
LRM + GTA-1Claude 3.7 Sonnet + GTA-112.126.768.8
Gemini 2.5 Pro + GTA-18.423.574.5
o1 + GTA-17.521.173.1
o3 + GTA-114.028.776.7
o4-mini + GTA-110.321.767.1
VLM + GUI-ACTORGPT-4o + GUI-Actor6.518.067.0
Qwen 2.5 72B + GUI-Actor9.321.464.9
Gemini 2.5 Flash + GUI-Actor10.325.673.1
LRM + GUI-ACTORClaude 3.7 Sonnet+ GUI-Actor7.518.563.9
Gemini 2.5 Pro + GUI-Actor9.324.073.2
o1 + GUI-Actor7.517.768.2
o3 + GUI-Actor12.127.474.0
o4-mini + GUI-Actor8.420.065.1
VLM + UI-TARSGPT-4o + UI-TARS6.520.867.3
Qwen 2.5 72B + UI-TARS7.521.863.2
Gemini 2.5 Flash + UI-TARS9.324.170.2
LRM + UI-TARSClaude 3.7 Sonnet + UI-TARS9.317.561.5
Gemini 2.5 Pro + UI-TARS7.523.471.6
o1 + UI-TARS6.518.566.0
o3 + UI-TARS12.127.272.4
o4-mini + UI-TARS7.519.462.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.414, + 0.913, + 0.445 + ], + "angle": 0, + "content": "Table 3: Model Performance for pairing GTA-1, GUI-Actor and UI-TARS with all LRMs & VLMs, including task success rate, average progress, and step accuracy. All results are in %." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.454, + 0.478, + 0.567 + ], + "angle": 0, + "content": "
Input TranscriptTask SuccessProgressStep Accuracy
Ground Truth10.321.766.4
Whisper Large-V38.420.965.5
Whisper Large-V3 (Noise)6.520.663.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.577, + 0.481, + 0.637 + ], + "angle": 0, + "content": "Table 4: Performance of GPT-4o + UGround-V1 using (1) ground-truth transcripts, (2) transcripts generated from original user speech by Whisper Large-V3, and (3) transcripts generated from noisy speech by Whisper Large-V3." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.661, + 0.481, + 0.748 + ], + "angle": 0, + "content": "process), so we only evaluate click-type actions with annotated bounding boxes are scored; instructions like \"scroll\" remain in the history but are not counted in our metrics. Of the 1,885 instructions, 1,412 are scored, yielding 1,714 evaluated action steps (one screenshot per step). Tasks average 17.6 evaluated steps." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.758, + 0.205, + 0.772 + ], + "angle": 0, + "content": "User behaviors" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.777, + 0.48, + 0.832 + ], + "angle": 0, + "content": "Figure 8 shows diverse user behaviors in RealWebAssist not present in previous benchmarks. We include a zip file of the live recordings (including audio) from which the examples are taken." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.833, + 0.48, + 0.875 + ], + "angle": 0, + "content": "Information seeking As Figure 8A shows, the user is seeking information from different aspects, like images and ratings, before they make the purchase decision." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.875, + 0.48, + 0.89 + ], + "angle": 0, + "content": "Comparing different options Figure 8B shows the process" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.457, + 0.913, + 0.484 + ], + "angle": 0, + "content": "of the user viewing two candidates and finally make the decision between them." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.485, + 0.913, + 0.54 + ], + "angle": 0, + "content": "Changing minds In Figure 8C, the user is searching for some immersive dining experience. They are checking different restaurants and frequently change their minds when they see more options." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.54, + 0.913, + 0.596 + ], + "angle": 0, + "content": "Trial-and-error As Figure 8D shows, the user has several unsuccessful attempts when searching for men's fashion week. They refer to previous searches or initiate new ones to look for what they want." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.596, + 0.913, + 0.667 + ], + "angle": 0, + "content": "These diverse behaviors increase the complexity of the web assistance: instead of clearly defined-goals, the user themselves are also actively collecting knowledge to make decisions, which requires web assistant to follow the user's mind and act accordingly." + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.182, + 0.076, + 0.367, + 0.089 + ], + "angle": 0, + "content": "\"Close all the tabs\"" + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.09, + 0.898, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.27, + 0.913, + 0.314 + ], + "angle": 0, + "content": "Figure 7: Example of annotated bounding boxes for an instruction. The red boxes represent the correct bounding boxes. The user gave the instruction \"Close all the tabs\". For evaluation purposes, closing any of the tabs first is considered correct at each step, so all the x marks are labeled as correct at each step." + }, + { + "type": "title", + "bbox": [ + 0.095, + 0.331, + 0.348, + 0.352 + ], + "angle": 0, + "content": "A Information seeking" + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.355, + 0.918, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.094, + 0.466, + 0.442, + 0.487 + ], + "angle": 0, + "content": "B Comparing different options" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.489, + 0.917, + 0.6 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.094, + 0.616, + 0.301, + 0.637 + ], + "angle": 0, + "content": "C Changing minds" + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.642, + 0.918, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.095, + 0.746, + 0.29, + 0.765 + ], + "angle": 0, + "content": "D Trial-and-error" + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.77, + 0.895, + 0.862 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.3, + 0.881, + 0.696, + 0.897 + ], + "angle": 0, + "content": "Figure 8: Example of rich user behaviors in RealWebAssist." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.068, + 0.221, + 0.082 + ], + "angle": 0, + "content": "Full List of Tasks" + }, + { + "type": "title", + "bbox": [ + 0.164, + 0.102, + 0.313, + 0.117 + ], + "angle": 0, + "content": "Task # Description" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.122, + 0.632, + 0.137 + ], + "angle": 0, + "content": " 1 Buy a gift for each of my three friends with a budget of $100" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.137, + 0.734, + 0.151 + ], + "angle": 0, + "content": "2 Find and buy a birthday gift for a friend who loves tech, within a $50 budget." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.151, + 0.601, + 0.164 + ], + "angle": 0, + "content": "3 Purchase a cute water bottle for everyday use, under $15" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.165, + 0.621, + 0.178 + ], + "angle": 0, + "content": "4 Compare different laptops and buy one with the best review" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.179, + 0.808, + 0.192 + ], + "angle": 0, + "content": "5 Purchase three home workout items under \\(75 and compare their reviews before buying." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.192, + 0.834, + 0.219 + ], + "angle": 0, + "content": " 6 Find and order a customized gift (e.g., engraved or personalized) for a friend's graduation under $60." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.22, + 0.806, + 0.234 + ], + "angle": 0, + "content": " 7 Order a complete warm and durable winter outfit (jacket, gloves, and boots) under $200." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.234, + 0.834, + 0.26 + ], + "angle": 0, + "content": "8 Get two sets of reusable grocery bags under \\(20 total, checking for durability and eco-friendliness." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.261, + 0.834, + 0.276 + ], + "angle": 0, + "content": "9 Buy two wall paintings for a family house, one for a 13-year old boy, one for a 6-year old girl" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.276, + 0.664, + 0.289 + ], + "angle": 0, + "content": "10 Purchase a set of colorful coffee mugs under $20 with fun designs" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.29, + 0.789, + 0.304 + ], + "angle": 0, + "content": "11 Buy a small easy-care indoor plant under \\(15 and schedule delivery within three days" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.304, + 0.765, + 0.317 + ], + "angle": 0, + "content": "12 Get a colorful umbrella for under \\(30, making sure it's big enough for two people" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.317, + 0.834, + 0.344 + ], + "angle": 0, + "content": "13 Buy a set of scented candles under $25, ensuring they have good reviews for long-lasting fragrance." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.344, + 0.737, + 0.358 + ], + "angle": 0, + "content": "14 Find and purchase a durable phone case under $20 for an iPhone 14 Pro Max." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.359, + 0.71, + 0.372 + ], + "angle": 0, + "content": "15 Order a cozy throw blanket under \\(30, checking for softness and warmth." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.373, + 0.654, + 0.386 + ], + "angle": 0, + "content": "16 Buy a set of three face masks (reusable & breathable) under $15." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.387, + 0.744, + 0.4 + ], + "angle": 0, + "content": "17 Get a wireless Bluetooth speaker under \\(40 with good bass and waterproofing." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.401, + 0.816, + 0.414 + ], + "angle": 0, + "content": "18 Order a set of noise-canceling earplugs under $15, ensuring they're comfortable for sleep." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.414, + 0.662, + 0.427 + ], + "angle": 0, + "content": "19 Find and buy a compact travel pillow and eye mask set under $30." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.428, + 0.679, + 0.442 + ], + "angle": 0, + "content": "20 Purchase a set of six kitchen towels under \\(20 with high absorbency." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.442, + 0.708, + 0.455 + ], + "angle": 0, + "content": "21 Buy an adjustable desk lamp under \\(35 with multiple brightness settings." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.455, + 0.739, + 0.469 + ], + "angle": 0, + "content": "22 Order a pack of 12 gel pens under \\(15 in assorted colors with smooth writing." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.47, + 0.778, + 0.483 + ], + "angle": 0, + "content": "23 Purchase a waterproof picnic blanket under \\(40, ensuring it's easy to fold and carry." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.484, + 0.696, + 0.497 + ], + "angle": 0, + "content": "24 Buy a cute yet professional notebook under \\(20 for journaling or work." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.497, + 0.834, + 0.511 + ], + "angle": 0, + "content": "25 Find and purchase a comfortable memory foam seat cushion under \\(35 for long sitting hours." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.511, + 0.626, + 0.525 + ], + "angle": 0, + "content": "26 Order a set of reusable silicone food storage bags under $25." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.525, + 0.827, + 0.539 + ], + "angle": 0, + "content": "27 Buy a pair of comfy indoor slippers under \\(30 with high reviews for warmth and durability." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.539, + 0.668, + 0.553 + ], + "angle": 0, + "content": "28 Purchase a portable mini humidifier under \\(40 with USB charging." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.553, + 0.782, + 0.566 + ], + "angle": 0, + "content": "29 Order a stylish travel makeup bag under \\(25, ensuring it has multiple compartments." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.566, + 0.741, + 0.58 + ], + "angle": 0, + "content": "30 Find and order a surprise gift box for a friend who enjoys skincare, under $50." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.58, + 0.722, + 0.594 + ], + "angle": 0, + "content": "31 Compare wireless earbuds and purchase the best-reviewed pair under $100." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.594, + 0.785, + 0.607 + ], + "angle": 0, + "content": "32 Order a budget-friendly yet stylish smartwatch under \\( \\$ {75} \\) ,ensuring good battery life." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.608, + 0.834, + 0.634 + ], + "angle": 0, + "content": "33 Find and order a high-quality mechanical keyboard under $120, comparing typing feel and reviews" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.635, + 0.752, + 0.649 + ], + "angle": 0, + "content": "34 Find and buy a useful desk gadget under \\(40 for a friend who works from home" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.649, + 0.834, + 0.676 + ], + "angle": 0, + "content": "35 Plan flights for a trip from US to Europe (at least two different countries) for 3 days, comparing different airlines to find the best deal." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.677, + 0.834, + 0.703 + ], + "angle": 0, + "content": "36 Plan a 5-day trip to Japan, booking both flights and hotels, taking into account customer reviews." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.704, + 0.834, + 0.732 + ], + "angle": 0, + "content": "37 Book a hotel for a weekend trip for a good price near the beach within the country, making sure you can cancel the trip at any time" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.733, + 0.834, + 0.759 + ], + "angle": 0, + "content": "38 Plan a spontaneous weekend trip to a destination with cheap last-minute flights and good hotel deals, for hotel make sure it's comfortable enough." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.76, + 0.834, + 0.786 + ], + "angle": 0, + "content": "39 Book a luxury hotel for a weekend at a city in the west US, pay attention to different services offered" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.787, + 0.785, + 0.801 + ], + "angle": 0, + "content": "40 Plan a three-stop European trip in a single week, with flights and hotel for each place" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.802, + 0.834, + 0.828 + ], + "angle": 0, + "content": "41 Book hotel for a family tour of four to a kid-friendly destination, with a hotel offering family amenities and breakfast included." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.829, + 0.834, + 0.856 + ], + "angle": 0, + "content": "42 Arrange a road trip across the US, booking rental cars and a mix of motels and boutique hotels along the route." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.857, + 0.834, + 0.883 + ], + "angle": 0, + "content": "43 Book a romantic beach getaway in Hawaii for two people, make sure it's close to beach and have sea view" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.122, + 0.834, + 0.883 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.165, + 0.07, + 0.314, + 0.086 + ], + "angle": 0, + "content": "Task # Description" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.091, + 0.836, + 0.119 + ], + "angle": 0, + "content": "44 Plan a family Disney Cruise, securing flights to Port Canaveral and a hotel near the theme parks before sailing." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.119, + 0.834, + 0.147 + ], + "angle": 0, + "content": "45 Arrange a wine country getaway, booking flights to Napa Valley, a rental car, and a vineyard hotel with wine-tasting experiences." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.147, + 0.834, + 0.175 + ], + "angle": 0, + "content": "46 Find flights and a convertible rental car for a coastal drive in Hawaii, staying in beachfront resorts along the way." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.174, + 0.805, + 0.189 + ], + "angle": 0, + "content": "47 Choose flights to a popular ski destination and secure a lodge or hotel under \\(150/night." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.189, + 0.834, + 0.217 + ], + "angle": 0, + "content": "48 Book last-minute flights and a centrally located hotel in a major US city, focusing on deals under $100/night with great city landscape view." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.216, + 0.834, + 0.243 + ], + "angle": 0, + "content": "49 Secure round-trip flights to a scenic South American city and book a comfortable hotel near local attractions." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.243, + 0.834, + 0.271 + ], + "angle": 0, + "content": "50 Pick flights from a major US airport to a warm city in Canada, with a hotel under $100/night in the downtown area." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.271, + 0.834, + 0.298 + ], + "angle": 0, + "content": "51 Schedule flights and a boutique hotel stay in a city rich in history, aiming for under $100/night in a central location." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.298, + 0.834, + 0.327 + ], + "angle": 0, + "content": "52 Arrange direct flights to a popular theme park region, booking a nearby hotel or hotel with easy transportation" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.327, + 0.834, + 0.354 + ], + "angle": 0, + "content": "53 Schedule flights for a quick visit to a popular national park, booking a nearby lodge or hotel with scenic views." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.354, + 0.834, + 0.382 + ], + "angle": 0, + "content": "54 Book round-trip flights to a major Middle Eastern city and reserve a modern hotel near historic sites for under $100/night" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.382, + 0.805, + 0.397 + ], + "angle": 0, + "content": "55 Secure flights from the US to a tropical island, choosing a resort that offers water sports" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.396, + 0.834, + 0.423 + ], + "angle": 0, + "content": "56 Find flights and a resort for a tropical vacation in Cancun, Mexico, focusing on all-inclusive options for relaxation" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.423, + 0.834, + 0.451 + ], + "angle": 0, + "content": "57 Book flights to Cairo for a 5-day trip, then pick a hotel with a direct view of the Pyramids and free breakfast included" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.451, + 0.834, + 0.479 + ], + "angle": 0, + "content": "58 Book a solo retreat to Kyoto, Japan, selecting a traditional ryokan stay with an onsen and authentic Japanese breakfast." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.479, + 0.672, + 0.493 + ], + "angle": 0, + "content": "59 Buy tickets for 2 people to an NBA Basketball game next weekend." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.493, + 0.834, + 0.52 + ], + "angle": 0, + "content": "60 Find and book tickets for a concert by a top artist in the nearest major city within the next three months." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.52, + 0.687, + 0.534 + ], + "angle": 0, + "content": "61 Search for a last-minute concert ticket and find the best available seat." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.534, + 0.665, + 0.548 + ], + "angle": 0, + "content": "62 Book 3 tickets for a rivalry match between two major sports teams" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.548, + 0.834, + 0.576 + ], + "angle": 0, + "content": "63 Book 3 tickets for a unique or unusual event, such as a drag show, wrestling match, or haunted experience" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.576, + 0.834, + 0.603 + ], + "angle": 0, + "content": "64 Purchase four tickets for a Broadway musical happening next month, aiming for orchestra seats if possible." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.603, + 0.6, + 0.618 + ], + "angle": 0, + "content": "65 Buy tickets for a family of 4 with 2 kids to a MLB game" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.618, + 0.834, + 0.645 + ], + "angle": 0, + "content": "66 Find and book tickets to a popular stand-up comedy show in a western big city for the upcoming weekend, prioritizing seats near the front." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.645, + 0.771, + 0.659 + ], + "angle": 0, + "content": "67 Locate discounted tickets for a live theater performance in California this weekend" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.659, + 0.834, + 0.686 + ], + "angle": 0, + "content": "Search for an NFL game next month and buy two tickets in a mid-priced seating section for some eastern teams" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.686, + 0.834, + 0.714 + ], + "angle": 0, + "content": "69 Identify and reserve tickets for a children's matinee performance at a local venue, comparing any available family packages or group discounts." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.714, + 0.715, + 0.729 + ], + "angle": 0, + "content": "70 Secure seats for a must-see hockey match, comparing \"Best Seat\" options." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.729, + 0.834, + 0.756 + ], + "angle": 0, + "content": "71 Find tickets for a classical music or orchestra concert in the nearest major city next month, aiming for seats with a good view of the stage." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.756, + 0.834, + 0.783 + ], + "angle": 0, + "content": "72 Buy tickets for two people to an English Premier League soccer match in London city center next weekend." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.783, + 0.834, + 0.81 + ], + "angle": 0, + "content": "73 Find and purchase tickets to a major electronic music festival in Las Vegas within the next two months." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.81, + 0.834, + 0.839 + ], + "angle": 0, + "content": "74 Book seats for a stand-up comedy show in downtown Chicago next month, make sure the location is in city center." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.839, + 0.834, + 0.868 + ], + "angle": 0, + "content": "75 Search for tickets to a top-tier cricket match in Sydney next month, aiming for seats that offer a good view of the pitch" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.867, + 0.731, + 0.881 + ], + "angle": 0, + "content": "76 Locate a family-friendly musical performance near your city for next month." + }, + { + "type": "list", + "bbox": [ + 0.192, + 0.091, + 0.836, + 0.881 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.165, + 0.069, + 0.313, + 0.086 + ], + "angle": 0, + "content": "Task # Description" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.091, + 0.836, + 0.118 + ], + "angle": 0, + "content": "77 Purchase two tickets to an upcoming rugby match in Dublin next month, making sure seats are in a central section and remain under." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.119, + 0.836, + 0.146 + ], + "angle": 0, + "content": "78 Find a highly rated ballet or opera production in Paris within the next two months, choose the seat in the second floor if available" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.147, + 0.814, + 0.161 + ], + "angle": 0, + "content": "79 Find tickets to a major fashion event, such as a runway show or fashion week experience." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.161, + 0.834, + 0.188 + ], + "angle": 0, + "content": "80 Look for tickets to a themed immersive dining experience (e.g., murder mystery dinner, fantasy-inspired restaurant)" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.189, + 0.771, + 0.202 + ], + "angle": 0, + "content": "81 Book tickets for UEFA soccer game between two Spanish teams for the next week" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.203, + 0.772, + 0.216 + ], + "angle": 0, + "content": "82 Book a ticket for a rooftop movie screening or outdoor film festival in a major city." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.217, + 0.778, + 0.23 + ], + "angle": 0, + "content": "83 Find tickets for an esports event and compare standard vs. premium seating options." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.23, + 0.642, + 0.244 + ], + "angle": 0, + "content": "84 Book a ticket for a \"silent disco\" event in a city of your choice." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.244, + 0.836, + 0.271 + ], + "angle": 0, + "content": "85 secure two tickets to a major MLB game in a well-known ballpark anywhere in the U.S. next month, opting for seats along the first baseline." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.271, + 0.834, + 0.299 + ], + "angle": 0, + "content": "86 Find and book tickets for a large-scale country music festival occurring in the southern U.S. within the next two months, focusing on general admission passes." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.299, + 0.834, + 0.327 + ], + "angle": 0, + "content": "87 Purchase seats for a top-tier college football rivalry game taking place within the next six weeks, ensuring you can view the marching band's performance easily." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.327, + 0.834, + 0.354 + ], + "angle": 0, + "content": "88 Reserve tickets to a major NHL match in the next two months, choosing seats close to the ice." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.354, + 0.834, + 0.382 + ], + "angle": 0, + "content": "89 Book passes for a nationally touring art exhibition or immersive art experience within the next two months, ensuring weekend availability." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.382, + 0.834, + 0.41 + ], + "angle": 0, + "content": "90 Secure seats for a top-rated Broadway musical in New York City, making sure the date aligns with a Saturday evening performance." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.41, + 0.834, + 0.437 + ], + "angle": 0, + "content": "91 Reserve a spot for a special museum or cultural center night event (e.g., \"Night at the Museum\" or themed after-hours) in a major U.S. city within the next two months." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.438, + 0.715, + 0.452 + ], + "angle": 0, + "content": "92 Find the best deal on a new smartphone (latest model iPhone or Samsung)" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.452, + 0.609, + 0.466 + ], + "angle": 0, + "content": "93 Find the best dinner deal for two using food delivery apps" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.466, + 0.611, + 0.479 + ], + "angle": 0, + "content": "94 Purchase an outfit for a formal event within a $150 budget" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.48, + 0.545, + 0.493 + ], + "angle": 0, + "content": " 95 Buy a high-quality gaming chair for under $250" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.493, + 0.716, + 0.507 + ], + "angle": 0, + "content": "96 Find and book the best available concert tickets for a top artist in your city" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.507, + 0.765, + 0.521 + ], + "angle": 0, + "content": "97 Book tickets for a live theater performance and find a pre-show dinner reservation" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.521, + 0.594, + 0.535 + ], + "angle": 0, + "content": "98 Plan a sports game outing for two within a $150 budget" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.535, + 0.586, + 0.548 + ], + "angle": 0, + "content": "99 Plan a weekend getaway for two within a $500 budget" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.549, + 0.635, + 0.563 + ], + "angle": 0, + "content": "100 Organize a one-day itinerary for a solo traveler in a major city" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.563, + 0.545, + 0.577 + ], + "angle": 0, + "content": "101 Compare car rental options for a 5-day road trip" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.577, + 0.651, + 0.59 + ], + "angle": 0, + "content": "102 Find and book a local escape room challenge for a group of four" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.59, + 0.585, + 0.603 + ], + "angle": 0, + "content": "103 Plan a movie night with discounted tickets and snacks" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.603, + 0.655, + 0.618 + ], + "angle": 0, + "content": "104 Find a highly-rated sushi restaurant and order a meal for delivery" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.618, + 0.602, + 0.631 + ], + "angle": 0, + "content": "105 Plan a surprise birthday dinner at a fine dining restaurant" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.631, + 0.538, + 0.645 + ], + "angle": 0, + "content": "106 Order a late-night snack under $15 for delivery" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.645, + 0.528, + 0.659 + ], + "angle": 0, + "content": "107 Book a luxury hotel staycation for a weekend" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.091, + 0.836, + 0.659 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.681, + 0.246, + 0.696 + ], + "angle": 0, + "content": "Full List of Websites" + }, + { + "type": "table", + "bbox": [ + 0.25, + 0.711, + 0.749, + 0.879 + ], + "angle": 0, + "content": "
NameURLTask Type
ACL Festivalaclfestival.comEntertainment
Amazonamazon.comShopping
Ammooraammoora.comEntertainment
Appleapple.comShopping
Artechouseartechouse.comEntertainment
Atom Ticketsatomtickets.comEntertainment
Best Buybestbuy.comShopping
Adidas Arenabilletterie.adidasarena.comEntertainment
Broadwaybroadway.comEntertainment
Charm City Clue Roomcharmcityclueroom.comEntertainment
City Passcitypass.comTravel Planning
" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.251, + 0.066, + 0.749, + 0.777 + ], + "angle": 0, + "content": "
NameURLTask Type
CN Towercntower.caTravel Planning
Colorado Tourismcolorado.comTravel Planning
Corsaircorsair.comShopping
Coupon Followcouponfollow.comShopping
Crave 4Dcrave4d.comEntertainment
Dine Immersivedineimmersive.comFood
Disney Cruisedisneycruise.disney.go.comTravel Planning
DoorDashdoordash.comFood
Drone and DSLRdroneandslr.comShopping
Enterpriseenterprise.comTravel Planning
ESChartsescharts.comEntertainment
ETIXetix.comEntertainment
Eventbriteeventbrite.comEntertainment
Expediaexpedia.comTravel Planning
Fashion Week Onlinefashionweekonline.comEntertainment
Fever Upfeverup.comEntertainment
Googlegoogle.comTravel Planning
Google Mapsgoogle.com/mapsTravel Planning
Live Nationlivenation.comEntertainment
Library of Congressloc.govTravel Planning
LoL Esportslolesports.comEntertainment
MLBmlb.comEntertainment
MLB Ticketsmlbtickets.comEntertainment
NYICFFnyicff.orgEntertainment
OpenTableopentable.comFood
Postmatespostmates.comFood
Rakutenrakuten.comShopping
Redditredgit.comEntertainment
Retail Me Notretailmenot.comShopping
Road Trip USAroadtripusa.comTravel Planning
Samsungsamsung.comShopping
San Lorenzo DCsanlorenzodc.comFood
Screen Dailyscreendaily.comEntertainment
Secret Baltimoresecretbaltimore.comTravel Planning
Secret Labsecretlab.coShopping
Smithsonian Sleepoverssmithsoniansleepovers.orgEntertainment
StubHubstubhub.comEntertainment
The Bureau Fashion Weekthebureaufashionweek.comEntertainment
The Meltdownthemeltdown.comEntertainment
The UFLtheufl.comEntertainment
Ticketmasterticketmaster.comEntertainment
Ticketmaster Franceticketmaster.frEntertainment
Ticket Webticketweb.comEntertainment
TickPicktickpick.comEntertainment
TripAdvisortripadvisor.comTravel Planning
Two Step Inntwostepinn.comEntertainment
Two Step Inn Frontgatetwostepinn.frontgatetickets.comEntertainment
Uberuber.comTravel Planning
Uber Eatsubereats.comFood
Viatorviator.comTravel Planning
Vivid Seatsvividseats.comEntertainment
Washington Tourismwashington.orgTravel Planning
Yelpyelp.comFood
Zarazara.comShopping
" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.085, + 0.069, + 0.218, + 0.085 + ], + "angle": 0, + "content": "Word Frequency" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.087, + 0.481, + 0.171 + ], + "angle": 0, + "content": "Figure 9 compares the most frequent instruction words in RealWebAssist with those from two common benchmarks, WebLINX and WebArena. The vocabulary used in RealWebAssist is more informal, as the dataset comes from natural spoken instructions. The tone is also more informal and conversational compared to WebLINX and WebArena." + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.192, + 0.462, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.083, + 0.375, + 0.48, + 0.417 + ], + "angle": 0, + "content": "Figure 9: Word Cloud of the most frequent words in RealWebAssist v.s. common benchmarks WebLINX and WebArena." + }, + { + "type": "title", + "bbox": [ + 0.146, + 0.446, + 0.419, + 0.465 + ], + "angle": 0, + "content": "Instructions for the participants" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.477, + 0.455, + 0.576 + ], + "angle": 0, + "content": "Thank you for participating in our study! You'll be guiding another person who is controlling the computer on your behalf. Imagine you are helping a friend navigate a website remotely, giving step-by-step instructions to complete a task. Feel free to interpret the task as you see fit. Here are some guidelines to keep in mind:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.579, + 0.454, + 0.607 + ], + "angle": 0, + "content": "- Give instructions as naturally as possible, just like you would in real life." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.611, + 0.454, + 0.638 + ], + "angle": 0, + "content": "- You don't have to be overly precise—say what feels natural." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.641, + 0.455, + 0.684 + ], + "angle": 0, + "content": "- You can only give one instruction at a time. After the operator follows your instruction, wait for them to complete it before giving the next step." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.686, + 0.455, + 0.728 + ], + "angle": 0, + "content": "- Keep your instructions clear and concise, but don't stress too much about exact wording—just say what comes to mind!" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.731, + 0.454, + 0.759 + ], + "angle": 0, + "content": "- You are allowed to instruct the operator to use Google to search for things." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.579, + 0.455, + 0.759 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.782, + 0.348, + 0.8 + ], + "angle": 0, + "content": "Video Example" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.806, + 0.454, + 0.835 + ], + "angle": 0, + "content": "A sample raw recording can be viewed via the link below (audio included)" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.847, + 0.3, + 0.863 + ], + "angle": 0, + "content": "https://youtu.be/CcyIt9tr5qo" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10445/92de7eea-1f86-4346-b55b-8d273a167685_origin.pdf b/data/2025/2504_10xxx/2504.10445/92de7eea-1f86-4346-b55b-8d273a167685_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d547458689eded2fc8d560bc2c47fb7b105a6357 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/92de7eea-1f86-4346-b55b-8d273a167685_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70e7e44010cd2dbec1cd68a30f0cbda98db8362c70b0a69533bbc923807b470d +size 12756290 diff --git a/data/2025/2504_10xxx/2504.10445/full.md b/data/2025/2504_10xxx/2504.10445/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3b301288c46b14b9276204dcf2b2f8db7c13be59 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/full.md @@ -0,0 +1,544 @@ +# RealWebAssist: A Benchmark for Long-Horizon Web Assistance with Real-World Users + +Suyu Ye\*, Haojun Shi\*, Darren Shih 1, Hyokun Yun 2, Tanya G. Roosta 2, Tianmin Shu + +1Johns Hopkins University, + +2Amazon.com + +{sye10, hshi33, dshih5, tianmin.shu}@jhu.edu, {yunhyoku,troosta} $@$ amazon.com + +# Abstract + +To achieve successful assistance with long-horizon web-based tasks, AI agents must be able to sequentially follow real-world user instructions over a long period. Unlike existing web-based agent benchmarks, sequential instruction following in the real world poses significant challenges beyond performing a single, clearly defined task. For instance, real-world human instructions can be ambiguous, require different levels of AI assistance, and may evolve over time, reflecting changes in the user's mental state. To address this gap, we introduce RealWebAssist, a novel benchmark designed to evaluate sequential instruction-following in realistic scenarios involving long-horizon interactions with the web, visual GUI grounding, and understanding ambiguous real-world user instructions. RealWebAssist includes a dataset of sequential instructions collected from real-world human users. Each user instructs a web-based assistant to perform a series of tasks on multiple websites. A successful agent must reason about the true intent behind each instruction, keep track of the mental state of the user, understand user-specific routines, and ground the intended tasks to actions on the correct GUI elements. Our experimental results show that state-of-the-art models struggle to understand and ground user instructions, posing critical challenges in following real-world user instructions for long-horizon web assistance. + +# Introduction + +As an integral part of people's daily life, many of our everyday tasks are performed on the internet. With the tremendous advances in open-ended agents driven by large reasoning models (LRMs) and vision-language models (VLMs), there has been increasing interest in engineering web-based agents that can assist humans with complex tasks on the web following humans' instructions (Zheng et al. 2024a; Nakano et al. 2022). Recent works have demonstrated the promising performance of web-based agents on planning (Putta et al. 2024; Wang et al. 2024; Yao et al. 2023) and Graphical User Interface (GUI) grounding (Cheng et al. 2024; Wu et al. 2024b; Gou et al. 2024; Yang et al. 2024; Xu et al. 2024), across diverse websites, tasks, and GUI interfaces. + +Despite these encouraging results, there have not been systematic studies on long-horizon web assistance with real- + +world users. Existing benchmarks (e.g., (Zhou et al. 2023; Deng et al. 2024; Cheng et al. 2024; Yao et al. 2022; Jang et al. 2024)) typically focus on performing a task based on a single instruction. Additionally, the instructions in the current benchmarks were not collected from real users during natural web use sessions, lacking the realism of real user instructions. As a result, these benchmarks do not capture the full complexity of real users' web behavior and instructions. + +To bridge this gap, we propose RealWebAssist, the first sequential instruction following benchmark that evaluates long-horizon web assistance with real-world users. As illustrated in Figure 1, to perform a task, a user will instruct an AI assistant in a long sequence. Based on the past instructions and screenshots, the AI assistant must execute one or a few steps of actions to perform the latest instruction. Additionally, a user can engage in repeated interactions over a series of tasks with the assistant in a long session up to 40 minutes. To construct RealWebAssist, we recruited real users to instruct an assistant to perform multiple real-world tasks on the web. We created a large dataset with real user instructions (in both speech and text) for diverse real-world tasks and websites (as shown in Figure 2). + +The sequential instruction following tasks in our RealWebAssist benchmark reflect the natural human behavior on the web. First, real-world users may not initially know what they are looking for. Thus, they need to engage in information seeking on multiple web pages (e.g., step 1-2 in Figure 1), sometimes even across websites. Second, based on new information such as product reviews, users may change their minds (e.g., step 3). Third, users give simple instructions that are seemingly ambiguous out of the context but could be interpreted based on spatial and temporal context via pragmatic reasoning (Goodman and Frank 2016; Fried et al. 2023). For instance, the third instruction in Figure 1 does not explicitly describe which product, but an intelligent assistant should be able to infer the true user intent and correctly select the product in the user's mind. Lastly, in our benchmark, users can browse the websites and have the autonomy to make critical decisions (such as purchasing) on their own, which is complementary to existing benchmarks that focus on agents' planning ability to fully complete the tasks without human involvement. + +We systematically evaluate state-of-the-art models, including GUI grounding, VLMs, and large reasoning mod + +![](images/d264e9d9b78f0d24469e8645a6781a8b7b70be1f55ed0a83e526bba6feb6e03f.jpg) +Figure 1: An example sequential instruction following task with a real-world user. The red circles indicate the correct actions based on the user's spoken instructions. Sequential instructions introduce unique challenges, such as the need to retain and reason over past context. For instance, the instruction in step 3 requires information from step 1 to be correctly interpreted. + +![](images/d2e93543deec6e7df545c85e4659b87e11f9d991874be90009730826ed4e310e.jpg) +Figure 2: Examples of general task categories (left) and websites visited (right) in RealWebAssist. The tasks span a wide range of real-world scenarios, from shopping to food & entertainment to travel planning, which encourages users to visit many different websites. + +els. Experimental results reveal that these models lack several key abilities, including grounding, understanding user intents, reasoning about spatial and temporal context, and adapting to user-specific routines. + +# Related Works + +Web Agent Benchmarks. Existing web agent benchmarks primarily evaluate the performance of web agents on tasks with clearly defined, unambiguous instructions, often overlooking the complexities of real-world users' behavior and their instructions to an AI assistant. On WebArena (Zhou et al. 2023), Mind2Web (Deng et al. 2024), and WebShop (Yao et al. 2022), an agent follows a single instruction to perform an isolated task. While they offer an evaluation of an agent's planning capacity, they lack the evaluation of an agent's ability to follow a long sequence of user instructions on long-horizon web tasks. There have also been GUI grounding benchmarks, such as ScreenSpot (Cheng et al. 2024), that focused on grounding simple instructions to clicking actions on webpages. These instructions only instruct web agents to click web elements rather than reaching a user goal (e.g., purchasing an item). WebLINX (Lü, Kasner, and Reddy 2024) features sequential instruction following. However, the instructions were generated by annotators who received detailed guidelines and extensive training, rather than by actual users. The resulting instructions do not capture the nuances and complexity of real-world user instructions that naturally emerge in interactions with an as- + +sistent. In contrast, RealWebAssist consists of sequential instruction following tasks for assisting real-world users, providing a novel set of challenges necessary for long-horizon web assistance for real-world users. Table 1 summarizes key differences between RealWebAssist and prior benchmarks. + +Autonomous Web Agents. There have been many recent works on engineering autonomous web agents through retrieval augmented planning (Kim et al. 2024; Zhou et al. 2024; Wu et al. 2024a; He et al. 2024; Pan et al. 2024), finetuning (Hong et al. 2024; Gur et al. 2024; Deng et al. 2024; Pang et al. 2024; Zhang and Zhang 2024), learning workflows (Zhang et al. 2023; Wang et al. 2024; Zheng et al. 2024b; Majumder et al. 2023; Cai et al. 2024), reinforcement learning (Liu et al. 2018; Shi et al. 2017; Nogueira and Cho 2016; Humphreys et al. 2022), and combinations of these methods (Liu et al. 2023; Putta et al. 2024). These works focus on planning for a single task. However, there has not been much work on understanding and following real-world users' sequential instructions on long-horizon tasks. + +GUI Grounding. One key ability for web agents in many assistance tasks is to ground instructions to clicking actions on a webpage. Recent works have explored VLM finetuning (e.g., (Gou et al. 2024; Wu et al. 2024b; Yang et al. 2024, 2025; Wu et al. 2025; Qin et al. 2025; Xu et al. 2025; Yuan et al. 2025)) as well as prompting pretrained VLMs with segmentations of web elements (e.g., (Yang et al. 2023)) for enabling GUI grounding. These methods generate coordinates or bounding boxes on webpages to indicate where to click. + +
BenchmarkReal UserSequential InstructionsReal WebsitesGUI GroundingSpeech# Instructions
SreenSpot (Cheng et al. 2024)XXX1200+
WebArena (Zhou et al. 2023)XXXXX812
Mind2Web (Deng et al. 2024)XXXX2000+
WebLINX (Lù, Kasner, and Reddy 2024)XXX512
VideoWebArena (Jang et al. 2024)XXXX2021
WebShop (Yao et al. 2022)XXXXX12087
BearCubs (Song et al. 2025)XXXX111
RealWebAssist (Ours)1885
+ +Table 1: Comparison between RealWebAssist and existing web agent benchmarks on several key aspects: (1) whether instructions were given by real-world users instead of annotators, (2) whether there is a sequence of instructions, (3) whether there are real-world websites, (4) whether the agent needs to execute actions by selecting coordinates on webpages, (5) whether the instructions are speech instructions, and (6) the number of total instructions. + +![](images/3262cf9379b85a21d2a4c5c9a55cea3280c0ffedb92bc61498969f0ae3157c4a.jpg) +"Ok, buy this item" + +![](images/5e15a46f4526020f333c8caa9d15eeec63cb8f6981f624f2ab01f59fc343101e.jpg) +"Let's do All Airports" +Figure 3: Multiple actions can satisfy a user's intent. A web agent's action is considered correct if the coordinate they provide is within one of the annotated correct regions. + +They have only been trained on low-level instructions that clearly refer to web elements. It remains unclear if they can understand real-world user instructions that must be interpreted considering context or may refer to high-level goals. + +# RealWebAssist Benchmark + +# Problem Setup + +RealWebAssist evaluates agents' ability to follow long-horizon, sequential web instructions to assist users with their high-level goals. In each task, a human user will try to reach an open-ended goal such as "buy formal outfits for a formal event" by instructing the assistant through a series of spoken instructions. The dataset is collected from interactions between human users and human assistants in a human experiment. To evaluate agents, we use the human assistants' actions to evaluate the agents' success. + +In RealWebAssist, a web agent has access to the current instruction, webpage (as a screenshot), and all the past interactions (previous instructions & screenshots of webpages). Since we are focusing on tasks on real-world websites, it is challenging to ensure safety and reproducibility in an interactive evaluation setting. Therefore, we adopt an offline evaluation setting following prior web-based agent benchmarks with real websites (Deng et al. 2024; Cheng et al. 2024). Specifically, for each instruction collected from the human experiment, the agent needs to identify the correct element to interact with by providing a coordinate or a bound + +ing box to click on the webpage. As shown by figure 3, a web agent's action is considered correct if the coordinate or the center of the bounding box they provide falls in the annotated correct regions on the webpage. If there are multiple steps corresponding to one instruction, we evaluate if the web agent's actions for the same instruction are all correct. + +# Evaluation Metrics + +We consider the following evaluation metrics: + +- Task success rate: A task is successful if the web agent can correctly produce actions for all instructions in a task. +- Average progress: We measure the progress of a task by the percentage of consecutive instructions the web agent can successfully perform before its first error in the task. +- Step success rate: We also consider a teacher forcing setting as a simpler, diagnostic evaluation, where the web agent will only need to follow the instruction at a single step of a task assuming all previous instructions have been successfully performed. + +# Dataset Construction + +Setup. We recruited 10 participants (4 female, 6 male, mean age = 20 years) from a US university campus, none of whom had prior knowledge of the study's purpose, to construct the dataset. All participants were native or fluent English speakers. Each participant completed a 40-minute real-world web assistance session in which they tackled a series of open-ended tasks designed to encourage diverse strategies. During each session, participants verbally instructed an experimenter, who operated the computer on their behalf, to complete the tasks. We captured screen recordings and used a high-quality USB microphone to record speech as raw data. The user study was approved by an institutional review board. + +User Tasks. To increase the instruction diversity and realism, participants received general web-based tasks requiring active information seeking, sub-goal planning, and comparison among various options. We generated the task list by few-shot prompting GPT-4o with open-ended tasks, followed by manual filtering and editing to ensure task quality and feasibility. These tasks provide only general guidance, + +ensuring flexibility for personal decision-making. Example tasks include "Purchase an outfit for a formal event" and "Plan a 5-day trip to Japan, booking both flights and hotels". Each user finishes about 10 tasks. + +Emergent User Behavior. In our realistic, open-ended settings, users exhibit rich behaviors that are not present in previous benchmarks. These include, but are not limited to, information seeking, researching and comparing different options, change of mind, and trial-and-error. + +Annotations. We manually labeled RealWebAssist data to ensure high-quality annotations. We first segmented the full recording into individual clips corresponding to each user's instructions. In our benchmark, we disregard user speech unrelated to explicit instructions for the assistant, such as filler words or verbalized thought processes. For each instruction, we provide raw speech, speech transcript, webpage, and the correct regions to click (in the form of one or more bounding boxes). When there were multiple correct answers for the instructions (for instance, "can you close all the current tabs"), we annotated all correct regions with multiple bounding boxes. When the experimenter made a mistake during the data collection sessions, we annotated the correct action intended by the user. If an instruction required multiple steps to complete, we set the instruction at each step as the same instruction. To generate the text instructions, we used an off-the-shelf recognition model, Whisper Large-V3 (Radford et al. 2023), to transcribe users' speech and then manually fixed transcription errors. For all the instructions, we have three annotators verifying all of them, ensuring $100\%$ agreement. + +Dataset Statistics. RealWebAssist contains 1,885 user instructions across 107 tasks, 66 websites, and 2,524 screenshots. In addition to the benchmark, we also plan to release the raw data, consisting of over 6 hours of video & audio. + +# Key Challenges + +RealWebAssist features multiple challenges as illustrated in Figure 4, including spatial and temporal reasoning needed to understand ambiguous and context-dependent user instructions, planning for multiple steps of actions to reach the goal communicated by an instruction, and learning about user-specific routines. These key challenges provide a more realistic and holistic evaluation of a web agent's reasoning, planning, and learning abilities to assist real-world users on long-horizon tasks. It is worth noting that many of these challenges, in particular, spatial reasoning, temporal reasoning, and routine understanding, are not present in existing web agent benchmarks. Unlike RealWebAssist, prior benchmarks, such as ScreenSpot (Cheng et al. 2024), WebArena (Zhou et al. 2023), and Mind2Web (Deng et al. 2024), only include clear, unambiguous, and non-sequential instructions. + +Spatial Reasoning. When referring to one of the elements on a webpage, real-world users tend to use a concise instruction that can be understood conditioned on spatial context instead of an overly elaborated instruction. For instance, when instructing an assistant to buy a product, users may give short instructions such as "select the cheapest one," instead of describing the desired product in detail. Figure 4A depicts different types of spatial reasoning that rely on di + +verse spatial contexts, including ranking, spatial relations, and overall website functionalities. It is worth noting that these instructions may sometimes reveal users' preferences (e.g., preferred seating), providing additional information for the web agent to provide potentially more customized assistance in the future. + +Temporal Reasoning. In our sequential instruction following tasks, users may instruct an assistant with the history as an assumed temporal context. For example, to understand the intended meaning of "click the last item," the assistant must memorize the items the user has viewed in the past. Figure 4B shows temporal reasoning based on different kinds of temporal context, ranging from short context between two consecutive webpages to long context with the same website to long context across websites. From the temporal context, the assistant needs to memorize crucial elements in the previous webpages, infer and track a user's mind (e.g., change of mind about what to buy) based on the past instructions and webpages, and identify the earlier webpage the user refers to. Such temporal reasoning has not been evaluated in prior web agent benchmarks. However, it is very common in our benchmark due to the nature of human web browsing behavior as well as human instructions guided by pragmatics (Goodman and Frank 2016). + +Multi-step Planning. Many instructions require multiple steps to complete. In these cases, the assistant needs to interpret the goal implied by the instruction and plan a sequence of actions to achieve that goal. This goes beyond grounding the instruction to a single action on the current webpage. Figure 4C shows an example where the agent was asked to repeat the same order on another food delivery website to check if the price would be different. A successful execution of this instruction would require the agent to first understand what the order is to ground the goal on the current website and generate a successful multi-step plan. + +Routine. Since our benchmark allows a user to engage in repeated interactions with an assistant over multiple tasks, we observe that users may define routines understood by the assistant after repeated interactions. As shown in Figure 4D, the user initially gave detailed step-by-step instructions when selecting arrival and departure dates for a flight. In a subsequent task, however, the user simplified them into a single instruction when selecting dates for a hotel room. Such shorter instructions become possible after establishing a routine in the earlier task. Cognitive studies found that procedural abstraction, like these routines, naturally emerges in human cooperative communication through repeated interactions, allowing more efficient communication with partners (McCarthy et al. 2021). The emergence of such routines in our benchmark poses a novel challenge for web agents—learning user-specific procedural abstraction via repeated interactions to achieve human-like adaptive assistance. We hypothesize that this ability could enhance users' perception of the AI assistant, as it understands human cooperative communication. + +# A Spatial Reasoning + +![](images/ba5e71d4116049f60b5bc9cd16dfe9a18244ef18f0bc79b68f33743507ec46fb.jpg) +"Can you click on the seventh tab?" + +![](images/2eead0b82df55cb899f35efdc9503c8c8ebdfc8a7ecc837bd3f5b544ab79cf9d.jpg) +Ranking +"And let's just get the lowest price tickets" + +![](images/7a41bced840724c2c6501da63e165f94557a7e0f02f89d9734f7562716e63e71.jpg) +Spatial relations +"Can you click the arrow between the two" + +![](images/d855e2cf8493411bd282771d1d34a5bab6d962d3dd3397a281310a818ed5544c.jpg) +Only select the two seats on the top + +![](images/23a81543b1cddff082f6d87c6f256bc893c2a538cd4253b22058339db2d13e45.jpg) +Website functions +"Change the end date from 20 to 22nd" + +# B Temporal Reasoning + +![](images/63902a9f6317abf275642953e8b40809c46f12a3b365a3c43d271fe3a613ffd9.jpg) +Previous webpage +"Goto the previous tab" + +![](images/cb5d8e54ab382218c82963c419815a5545b40036eb8a04989b805fb5a8943dca.jpg) +"No, stay on that page" + +![](images/9a13fc77395a1b436583c1dc8a2907791f0c87b3cee6c2fd0a7775d0ee8bcce6.jpg) +Long context within the same website +"Click on HP laptop" +Long context across multiple websites + +![](images/3a00d16673abd9bdbb7dc446ffd3e9a5cef52dfef2decba6e9b846a3e3006f18.jpg) +"Can you check ASUS?" + +![](images/8f9ec67c1254812380c80b3c73fa1c66e3a246c33c0fee2617f7b7621f8fb749.jpg) +"Go back to the other laptop" + +![](images/62cf7409464e0df9224fbc514bcfada2521f09cde0a5801f98f118c975b7f24f.jpg) +"Can you look at the next tab as well?" + +![](images/e47eaaa5ae2da59bed97e795c4703cf3c7e3ddd2ecd60129652f86f4bc7e9674.jpg) + +![](images/304fb0a3a410fe3d67552f171bcbdaf6ecdaff35d16ea1f23c4ff48f91fa6670.jpg) +"Oh, this is like 95 bucks. Can you press the other tab" +CityPASS + +![](images/201d9e2f83e91b7ffbe99cf13a4b2f44ec488e6b23d6d455f891891130e95907.jpg) +"OK, can you open a new tab and search for ..." + +![](images/8546b193f6d0948328afa144cb4d9752a6d699f8d9b30d3fcd79c63fc862dbcf.jpg) + +![](images/1eed3cbba66d7499656e171b12493d30247b2ce027294fbe4eca315736ec9e3a.jpg) +"This is 36. Can you go back to CN Tower's official website" + +![](images/01d4f6a679a498b0e3393f2d373eb46be9fc4e2c1ef4a99929fdd0f7130bbbfd.jpg) + +![](images/723238ade76f571ce35b12a7b4568326c588d00e3cdcc48105287acc73f8c99e.jpg) +"I'd probably get the city pass option" + +![](images/1adfabb0cb91990ce5bddb0cf1cc7badf04e77434e247af1898187b45eb832b4.jpg) + +# C Multi-step planning + +![](images/866645a1921b87066ab8f5f51fe034ee26f062d6eabfb1a6d939229e7b2578e6.jpg) +"Can you go to DoorDash and order the same thing to compare the price?" +History (not shown here): The user previously ordered Snooze melt from Meltdown and selected French Fries + +![](images/4f1154756e2c1ea4c745646e35c604eedd93c1b1fcccdb866bdd547aa94e8897.jpg) + +![](images/c76f097e0d7a8be85904af9e0ba26cc8688b241d6345fd0e81205598c4cfec7a.jpg) + +![](images/0bda5b47032a5ddbecdab3f3d2d1a16c54e28d78bceb97b1b655b1b06d0f4b3a.jpg) + +![](images/b44e9d6483d35381371556f2f7874777315f6c1b282d7f8ece0c0bb172957c07.jpg) + +# D Routine + +![](images/13d42bd21ef1b84253b992a925229d3a9601fb7eb54415e8caeb8b799ee8b7b6.jpg) +"Can we go to the dates?" +"And for dates do 3.17 to 3.21" +Earlier task: select dates for a round-trip flight +Later task: select dates for a hotel stay + +![](images/b930b1c1eb8244d7e57f1f9e3f48151543e6440e4c9c22d1b7738667ce567590.jpg) +"Can we select April 7th?" + +![](images/8022060c99b56e9562de56293f5888d82c66f54c2b561b33fb7e638919355843.jpg) +"And then April 14th" + +![](images/9f7d7479bdb7dfe1cbc2f6af2434f79d35b23ee13e316a307b48b24a38f0979c.jpg) +"And hit done" + +![](images/3582dddae1e60403cc5a6b850ad94c9bf2fadd65918f9cdaa0af5f00f8e90161.jpg) +Figure 4: Key challenges introduced by RealWebAssist: (A) spatial reasoning, (B) temporal reasoning, (C) multi-step planning, and (D) learning user-specific routines. + +"And for dates do 3.17 to 3.21" + +![](images/0ecfc95aeb49af0705291a27e30780180994aefb82a293996b90508495262b86.jpg) + +![](images/9c19821c5df1f6bd8f461904cde24337092636ed53ed5bafbef20c8cbd7b1197.jpg) + +# Experiments + +# Baselines + +We evaluated several types of models for web agents commonly evaluated in existing web agent benchmarks that have real-world websites (i.e., offline evaluation). For all the experiments, we use the ground-truth captions for instructions. + +GUI Grounding Models. GUI grounding models directly translate an instruction to an action on a webpage. There are + +two general types of grounding models. First, Set-of-Mark (SoM) (Yang et al. 2023) segments salient elements on a webpage using an off-the-shelf segmentation model (e.g., SAM (Kirillov et al. 2023) and Semantic-SAM (Li et al. 2023)) and prompts a VLM to select a segment mask to identify the clicking area corresponding to the given instruction. Second, VLMs finetuned on datasets with paired instructions and annotated clicking coordinates or bounding + +boxes. We evaluated UGround-V1 (Gou et al. 2024), OSAtlas (Wu et al. 2024b), Aria-UI (Yang et al. 2024), GTA-1 (Yang et al. 2025), GUI-Actor (Wu et al. 2024a), and UI-TARS (Qin et al. 2025). + +VLM/LRM + Grounding. Grounding models are designed or trained to ground a simple instruction to a webpage and thus tend to lack reasoning or planning capabilities. To address this, we leveraged VLMs and LRMs to first translate real user instructions to more understandable ones for grounding models. In particular, a VLM or an LRM needs to reason about the true user intent implied by the instruction and the spatial & temporal context. For instructions that require multiple actions, it needs to generate a plan to complete the instructions. Finally, it needs to generate a straightforward, clear instruction for the grounding model to produce the final action at each step. We evaluated state-of-the-art VLMs (OpenAI 2023; Team 2025; Qwen et al. 2025), as well as state-of-the-art LRMs (Jaech et al. 2024; Team 2025; Anthropic 2025). In the main results, we paired each VLM and LRM with the grounding model that achieved the highest step accuracy (GTA-1). For all VLMs and LRMs, we provide the past 10 steps for context, which we found to be a reasonable fixed context length in our preliminary study, balancing cost and informativeness. We also found that prompting models with screenshots of past webpages could incur a high cost. Therefore, we only prompt the models with the screenshot of the current webpage. For the history, we prompted GPT-4o to generate text-based action history based on consecutive screenshots and the instructions at each step. We then used this text-based history description for the evaluated VLMs and LRMs. + +Finetuning. To evaluate whether models can learn to better follow real-world user instructions with additional training, we finetuned the best-performing grounding model (GTA-1) following the model's original group relative policy optimization (GRPO) training procedure (Yang et al. 2025) on 9 participants' data and tested it on the held-out participants' instructions. Specifically, we trained the grounding model to produce an action based on the past 10 steps of actions (in text), the current webpage screenshot, and the instruction. We enumerated different train/test splits and reported the averaged performance, either using the finetuned model alone or pairing it with the best VLM or LRM. + +# Results + +Main results are summarized in Table 3. All models fell short in following real user instructions. The highest task success rate was only $14.0\%$ and the highest average progress was only $28.7\%$ a large gap compared to humans $(93.4\%)$ task success rate). This difference has a $95\%$ confidence interval of [71.3, 87.5], and is highly significant with p-value $< 0.0001$ . Grounding methods by themselves failed to finish most tasks. However, when paired with the best-performing grounding model (GTA-1), instructions generated by VLMs & LRMs significantly improved the performance. LRMs performed marginally better than most VLMs. Across all three metrics, Gemini 2.5 Flash, Gemini 2.5 Pro, and o3 showed the strongest performance. Finetuning GTA-1 on real user data marginally improved its perfor + +mance, but finetuning offered no benefit when GTA-1 was paired with VLMs and LRMs, since the finetuned model is trained to adapt to real users' instructions instead of instructions generated by VLM or LRM. + +# Discussion + +Can grounding models understand real-world user instructions? There remains a significant gap in the performance of current direct grounding methods. The best grounding model, GUI-Actor, has a task success rate of only $5.7\%$ . Figure 5 illustrates various failure cases encountered when directly using GTA-1. Unsurprisingly, grounding models fail to interpret instructions requiring reasoning due to their limited reasoning capabilities. However, even for context-free instructions involving straightforward spatial reasoning—tasks where grounding methods should excel—they frequently misinterpret spatial layouts or rankings. For instance, they often incorrectly select elements for instructions such as "click the first one." + +How can VLMs & LRMs help? VLMs or LRMs can convert the original user instructions into more direct and explicit descriptions that a grounding model can more easily understand. This is made possible by their reasoning capacities. For instance, in Figure 5A, the grounding model (GTA-1) on its own fails to select the first tab: it selects the first element instead of the first tab. However, it succeeds after o3 rewrites the instruction to refer to the title. As shown in Figure 5B, grounding models may sometimes still fail due to inherent limitations even when VLMs/LRMs generate clearer instructions. Nonetheless, incorporating VLMs or LRMs significantly improves overall performance. + +What are the limitations of VLMs & LRMs? While VLMs and LRMs help, the highest task success rate is still only $14.0\%$ . Beyond errors from grounding models (e.g., Figure 5B), they continue to struggle with complex temporal reasoning. In Figure 5C, the user previously asked to open the first two search results in new tabs. When later instructed to "look at the first one we just opened," o3 failed to identify which element "the first one" referred to—instead of the first newly opened tab, it pointed to the first search result. We further analyze the error distribution between reasoning errors (the VLM/LRM mistranslates the instruction and refers to the wrong element) and grounding errors (the rewritten instruction is correct, but the grounding model still fails to click the right element). For the best model $(\mathrm{o}3 + \mathrm{GTA} - 1)$ , $43.3\%$ of errors are grounding errors and $56.7\%$ are reasoning errors. This suggests that current VLMs and LRMs still lack the reasoning and planning abilities needed to robustly perform sequential instruction-following tasks. + +Does learning from real-world user data help? Finetuning GTA-1 marginally improved average progress and step accuracy but yielded no additional benefit when paired with VLMs and LRMs. These results show that the finetuned model better understands real user instructions, yet it still fails to generalize to instructions generated by VLMs and LRMs. The experiments suggest that finetuning grounding models on a small set of real user instructions provides minimal benefit, and collecting large-scale real user instructions remains a significant challenge. + +
CategoryModelTask SuccessProgressStep Accuracy
HumanHuman Operator93.496.499.2
GroundingSet-of-Mark0.02.729.8
OS-Atlas0.03.826.6
Aria-UI0.02.432.8
UGround-V10.06.247.7
UI-TARS2.813.153.8
GTA-13.717.761.5
GUI-Actor5.714.761.4
VLM + GroundingGPT-4o + GTA-18.423.572.7
Qwen 2.5 72B + GTA-19.324.369.0
Gemini 2.5 Flash + GTA-111.226.975.4
LRM + Groundingo1 + GTA-17.517.768.2
Gemini 2.5 Pro + GTA-18.423.574.5
o4-mini + GTA-110.321.767.1
Claude 3.7 Sonnet + GTA-112.126.768.8
o3 + GTA-114.028.776.7
FinetunedGTA-1-F3.7 (+0.0)19.7 (+2.0)64.3 (+2.8)
Gemini 2.5 Flash + GTA-1-F11.2 (+0.0)26.9 (+0.0)75.4 (+0.0)
o3 + GTA-1-F14.0 (+0.0)28.7 (+0.0)76.7 (+0.0)
+ +Table 2: Model Performance including task success rate, average progress, and step accuracy. All results are in %. The best performance of pretrained models and finetuned models is highlighted in bold. GTA-1-F indicates the finetuned GTA-1. Plus sign indicates the improvement compared to using the raw model for the same set of instructions. + +![](images/7efbfd1a507fb1217230f8a285f7a1064e7d85faba0ce51e3b238de71309c6dd.jpg) +Figure 5: Qualitative results. The captions show instructions generated by o3 (the best LRM). (A) Error corrected by using o3 to convert instructions. (B) Failure caused by GTA-1 when o3 reasons correctly. (C) Reasoning failure caused by o3. + +![](images/1560028b005585f00b7331b8ab6dda9bf201b15c7b0fadbaf391295da2699617.jpg) + +![](images/4d4d39ce25fbccf3bc1671ae26309bdc0979314bc11fc2de8468d2c3799b9ef5.jpg) + +Limitations. RealWebAssist represents an important first step towards evaluating web agents on long-horizon, real-user tasks. However, it has several limitations. The first is participant scale and diversity. Collecting real-user data is expensive and time-consuming. The number of participants is comparable to prior works that use expert annotators (Lu, Kasner, and Reddy 2024). However, we intend to increase user diversity in future versions of the benchmark. We will also open-source our data collection tools for community expansion of the dataset. Second, like prior benchmarks on + +real-world websites (Deng et al. 2024; Cheng et al. 2024), we constrain our evaluation to an offline setting to ensure reproducibility and safety. This is complementary to benchmarks that focus on interactive evaluation in sandbox environments (e.g., WebArena). We believe that web agents should be evaluated on both types of benchmarks to fully assess their capabilities. Lastly, the current setting does not allow dialogue between a user and the AI assistant, which we will explore in future work. + +# Conclusion + +In this paper, we present RealWebAssist, the first benchmark for evaluating web agents' ability to provide long-horizon web assistance with real-world users via sequential instruction-following. Our benchmark poses novel challenges, including spatial and temporal reasoning, planning, and adapting to user-specific routines. We conducted a comprehensive evaluation and analysis on multiple state-of-the-art GUI grounding models, VLMs, and LRMs, revealing critical limitations of them. We have also shown the limited benefit of finetuning models on real user data. Our benchmark, along with the well-annotated user instruction dataset, provides resources and diagnostic tools for further research on real-world web assistance. In future work, we plan to expand our human study to include more participants from various backgrounds, examine web assistance in interactive settings, and incorporate chat between users and web agents. + +# Acknowledgements + +This work was supported by a research grant from Amazon. We thank Janice Chen for helpful discussions. + +# References + +Anthropic. 2025. Claude 3.7 Sonnet and Claude Code. https://www.anthropic.com/news/claudi-3-7-sonnet. Accessed: 2025-03-17. +Cai, T.; Wang, X.; Ma, T.; Chen, X.; and Zhou, D. 2024. Large Language Models as Tool Makers. arXiv:2305.17126. +Cheng, K.; Sun, Q.; Chu, Y.; Xu, F.; Li, Y.; Zhang, J.; and Wu, Z. 2024. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935. +Deng, X.; Gu, Y.; Zheng, B.; Chen, S.; Stevens, S.; Wang, B.; Sun, H.; and Su, Y. 2024. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36. +Fried, D.; Tomlin, N.; Hu, J.; Patel, R.; and Nematzadeh, A. 2023. Pragmatics in Language Grounding: Phenomena, Tasks, and Modeling Approaches. arXiv:2211.08371. +Goodman, N. D.; and Frank, M. C. 2016. Pragmatic language interpretation as probabilistic inference. Trends in cognitive sciences, 20(11): 818-829. +Gou, B.; Wang, R.; Zheng, B.; Xie, Y.; Chang, C.; Shu, Y.; Sun, H.; and Su, Y. 2024. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243. +Gur, I.; Furuta, H.; Huang, A.; Safdari, M.; Matsuo, Y.; Eck, D.; and Faust, A. 2024. A Real-World WebAgent with Planning, Long Context Understanding, and Program Synthesis. arXiv:2307.12856. +He, H.; Yao, W.; Ma, K.; Yu, W.; Dai, Y.; Zhang, H.; Lan, Z.; and Yu, D. 2024. WebVoyager: Building an End-to-End Web Agent with Large Multimodal Models. arXiv:2401.13919. +Hong, W.; Wang, W.; Lv, Q.; Xu, J.; Yu, W.; Ji, J.; Wang, Y.; Wang, Z.; Zhang, Y.; Li, J.; Xu, B.; Dong, Y.; Ding, M.; and Tang, J. 2024. CogAgent: A Visual Language Model for GUI Agents. arXiv:2312.08914. + +Humphreys, P. C.; Raposo, D.; Pohlen, T.; Thornton, G.; Chhaparia, R.; Muldal, A.; Abramson, J.; Georgiev, P.; Santoro, A.; and Lillicrap, T. 2022. A data-driven approach for learning to control computers. In International Conference on Machine Learning, 9466-9482. PMLR. +Jaech, A.; Kalai, A.; Lerer, A.; Richardson, A.; El-Kishky, A.; Low, A.; Helyar, A.; Madry, A.; Beutel, A.; Carney, A.; et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720. +Jang, L.; Li, Y.; Zhao, D.; Ding, C.; Lin, J.; Liang, P. P.; Bonatti, R.; and Koishida, K. 2024. Videowebarena: Evaluating long context multimodal agents with video understanding web tasks. arXiv preprint arXiv:2410.19100. +Kim, M.; Bursztyn, V.; Koh, E.; Guo, S.; and Hwang, S.-w. 2024. Rada: Retrieval-augmented web agent planning with llms. In Findings of the Association for Computational Linguistics ACL 2024, 13511-13525. +Kirillov, A.; Mintun, E.; Ravi, N.; Mao, H.; Rolland, C.; Gustafson, L.; Xiao, T.; Whitehead, S.; Berg, A. C.; Lo, W.-Y.; Dollar, P.; and Girshick, R. 2023. Segment Anything. arXiv:2304.02643. +Li, F.; Zhang, H.; Sun, P.; Zou, X.; Liu, S.; Yang, J.; Li, C.; Zhang, L.; and Gao, J. 2023. Semantic-SAM: Segment and Recognize Anything at Any Granularity. arXiv preprint arXiv:2307.04767. +Liu, E. Z.; Guu, K.; Pasupat, P.; Shi, T.; and Liang, P. 2018. Reinforcement learning on web interfaces using workflow-guided exploration. arXiv preprint arXiv:1802.08802. +Liu, Z.; Yao, W.; Zhang, J.; Xue, L.; Heinecke, S.; Murthy, R.; Feng, Y.; Chen, Z.; Niebles, J. C.; Arpit, D.; et al. 2023. Bolaa: Benchmarking and orchestrating llm-augmented autonomous agents. arXiv preprint arXiv:2308.05960. +Lü, X. H.; Kasner, Z.; and Reddy, S. 2024. Weblinx: Realworld website navigation with multi-turn dialogue. arXiv preprint arXiv:2402.05930. +Majumder, B. P.; Mishra, B. D.; Jansen, P.; Tafjord, O.; Tandon, N.; Zhang, L.; Callison-Burch, C.; and Clark, P. 2023. CLIN: A Continually Learning Language Agent for Rapid Task Adaptation and Generalization. arXiv:2310.10134. +McCarthy, W. P.; Hawkins, R. D.; Wang, H.; Holdaway, C.; and Fan, J. E. 2021. Learning to communicate about shared procedural abstractions. arXiv preprint arXiv:2107.00077. +Nakano, R.; Hilton, J.; Balaji, S.; Wu, J.; Ouyang, L.; Kim, C.; Hesse, C.; Jain, S.; Kosaraju, V.; Saunders, W.; Jiang, X.; Cobbe, K.; Eloundou, T.; Krueger, G.; Button, K.; Knight, M.; Chess, B.; and Schulman, J. 2022. WebGPT: Browser-assisted question-answering with human feedback. arXiv:2112.09332. +Nogueira, R.; and Cho, K. 2016. End-to-end goal-driven web navigation. Advances in neural information processing systems, 29. +OpenAI. 2023. GPT-4 Technical Report. ArXiv, abs/2303.08774. +Pan, J.; Zhang, Y.; Tomlin, N.; Zhou, Y.; Levine, S.; and Suhr, A. 2024. Autonomous Evaluation and Refinement of Digital Agents. arXiv:2404.06474. + +Pang, R. Y.; Yuan, W.; Cho, K.; He, H.; Sukhbaatar, S.; and Weston, J. 2024. Iterative Reasoning Preference Optimization. arXiv:2404.19733. +Putta, P.; Mills, E.; Garg, N.; Motwani, S.; Finn, C.; Garg, D.; and Rafailov, R. 2024. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199. +Qin, Y.; Ye, Y.; Fang, J.; Wang, H.; Liang, S.; Tian, S.; Zhang, J.; Li, J.; Li, Y.; Huang, S.; et al. 2025. UI-TARS: Pioneering Automated GUI Interaction with Native Agents. arXiv preprint arXiv:2501.12326. +Qwen;.; Yang, A.; Yang, B.; Zhang, B.; Hui, B.; Zheng, B.; Yu, B.; Li, C.; Liu, D.; Huang, F.; Wei, H.; Lin, H.; Yang, J.; Tu, J.; Zhang, J.; Yang, J.; Yang, J.; Zhou, J.; Lin, J.; Dang, K.; Lu, K.; Bao, K.; Yang, K.; Yu, L.; Li, M.; Xue, M.; Zhang, P.; Zhu, Q.; Men, R.; Lin, R.; Li, T.; Tang, T.; Xia, T.; Ren, X.; Ren, X.; Fan, Y.; Su, Y.; Zhang, Y.; Wan, Y.; Liu, Y.; Cui, Z.; Zhang, Z.; and Qiu, Z. 2025. Qwen2.5 Technical Report. arXiv:2412.15115. +Radford, A.; Kim, J. W.; Xu, T.; Brockman, G.; McLeavey, C.; and Sutskever, I. 2023. Robust speech recognition via large-scale weak supervision. In International conference on machine learning, 28492-28518. PMLR. +Reddy, C. K.; Beyrami, E.; Pool, J.; Cutler, R.; Srinivasan, S.; and Gehrke, J. 2019. A scalable noisy speech dataset and online subjective test framework. arXiv preprint arXiv:1909.08050. +Shi, T.; Karpathy, A.; Fan, L.; Hernandez, J.; and Liang, P. 2017. World of bits: An open-domain platform for web-based agents. In International Conference on Machine Learning, 3135-3144. PMLR. +Song, Y.; Thai, K.; Pham, C. M.; Chang, Y.; Nadaf, M.; and Iyyer, M. 2025. Bearcubs: A benchmark for computer-using web agents. arXiv preprint arXiv:2503.07919. +Team. 2025. Gemini 2.5: Pushing the Frontier with Advanced Reasoning, Multimodality, Long Context, and Next Generation Agentic Capabilities. arXiv:2507.06261. +Wang, Z. Z.; Mao, J.; Fried, D.; and Neubig, G. 2024. Agent workflow memory. arXiv preprint arXiv:2409.07429. +Wu, Q.; Cheng, K.; Yang, R.; Zhang, C.; Yang, J.; Jiang, H.; Mu, J.; Peng, B.; Qiao, B.; Tan, R.; et al. 2025. GUI-Actor: Coordinate-Free Visual Grounding for GUI Agents. arXiv preprint arXiv:2506.03143. +Wu, Z.; Han, C.; Ding, Z.; Weng, Z.; Liu, Z.; Yao, S.; Yu, T.; and Kong, L. 2024a. OS-Copilot: Towards Generalist Computer Agents with Self-Improvement. arXiv:2402.07456. +Wu, Z.; Wu, Z.; Xu, F.; Wang, Y.; Sun, Q.; Jia, C.; Cheng, K.; Ding, Z.; Chen, L.; Liang, P. P.; et al. 2024b. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218. +Xu, Y.; Wang, Z.; Wang, J.; Lu, D.; Xie, T.; Saha, A.; Sahoo, D.; Yu, T.; and Xiong, C. 2024. Aguvis: Unified Pure Vision Agents for Autonomous GUI Interaction. arXiv:2412.04454. + +Xu, Y.; Wang, Z.; Wang, J.; Lu, D.; Xie, T.; Saha, A.; Sahoo, D.; Yu, T.; and Xiong, C. 2025. Aguvis: Unified Pure Vision Agents for Autonomous GUI Interaction. arXiv:2412.04454. +Yang, J.; Zhang, H.; Li, F.; Zou, X.; Li, C.; and Gao, J. 2023. Set-of-Mark Prompting Unleashes Extraordinary Visual Grounding in GPT-4V. arXiv preprint arXiv:2310.11441. +Yang, Y.; Li, D.; Dai, Y.; Yang, Y.; Luo, Z.; Zhao, Z.; Hu, Z.; Huang, J.; Saha, A.; Chen, Z.; et al. 2025. GTA1: GUI Test-time Scaling Agent. arXiv preprint arXiv:2507.05791. +Yang, Y.; Wang, Y.; Li, D.; Luo, Z.; Chen, B.; Huang, C.; and Li, J. 2024. Aria-UI: Visual Grounding for GUI Instructions. arXiv preprint arXiv:2412.16256. +Yao, S.; Chen, H.; Yang, J.; and Narasimhan, K. 2022. Webshop: Towards scalable real-world web interaction with grounded language agents. Advances in Neural Information Processing Systems, 35: 20744-20757. +Yao, S.; Zhao, J.; Yu, D.; Du, N.; Shafran, I.; Narasimhan, K.; and Cao, Y. 2023. ReAct: Synergizing Reasoning and Acting in Language Models. arXiv:2210.03629. +Ying, L.; Liu, J. X.; Aanya, S.; Fang, Y.; Tellex, S.; Tenenbaum, J. B.; and Shu, T. 2024. SIFToM: Robust Spoken Instruction Following through Theory of Mind. arXiv:2409.10849. +Yuan, X.; Zhang, J.; Li, K.; Cai, Z.; Yao, L.; Chen, J.; Wang, E.; Hou, Q.; Chen, J.; Jiang, P.-T.; and Li, B. 2025. Enhancing Visual Grounding for GUI Agents via Self-Evolutionary Reinforcement Learning. arXiv:2505.12370. +Zhang, C.; Yang, Z.; Liu, J.; Han, Y.; Chen, X.; Huang, Z.; Fu, B.; and Yu, G. 2023. AppAgent: Multimodal Agents as Smartphone Users. arXiv:2312.13771. +Zhang, Z.; and Zhang, A. 2024. You Only Look at Screens: Multimodal Chain-of-Action Agents. arXiv:2309.11436. +Zheng, B.; Gou, B.; Kil, J.; Sun, H.; and Su, Y. 2024a. Gpt-4v (ision) is a generalist web agent, if grounded. arXiv preprint arXiv:2401.01614. +Zheng, L.; Wang, R.; Wang, X.; and An, B. 2024b. Synapse: Trajectory-as-Exemplar Prompting with Memory for Computer Control. arXiv:2306.07863. +Zhou, A.; Yan, K.; Shlapentokh-Rothman, M.; Wang, H.; and Wang, Y.-X. 2024. Language Agent Tree Search Unifies Reasoning Acting and Planning in Language Models. arXiv:2310.04406. +Zhou, S.; Xu, F. F.; Zhu, H.; Zhou, X.; Lo, R.; Sridhar, A.; Cheng, X.; Ou, T.; Bisk, Y.; Fried, D.; et al. 2023. Webarena: A realistic web environment for building autonomous agents. arXiv preprint arXiv:2307.13854. + +# Appendix + +# More experiment results + +# Full VLM & LRM + Grounding results + +For the best three grounding models, GTA-1 (Yang et al. 2025), GUI-Actor (Wu et al. 2025) and UI-TARS (Qin et al. 2025), we test their pairing with all the VLMs and LRMs. Table 3 shows the full results. All the evaluation experiments are run on a single A100 GPU for 20 - 40 minutes. Finetuning GTA-1 model takes 4 hours on 4 A100 GPUs. + +![](images/95dcf2badd6229a6cb610ae8fd4f5db862d76b97acd78d06b8096a64989af287.jpg) +Experiment with different context lengths +Figure 6: Effect of context length on Gemini 2.5 Flash + GTA-1. + +We evaluated the best-performing VLM (Gemini 2.5 Flash) + GTA-1 with varying history context lengths, from no history to 20 steps. An ideal assistant should be able to leverage different kinds of historical context based on different instructions, ranging from no history to multi-task history context (e.g., for routine learning). As shown in Figure 6, increasing context length also does not necessarily lead to better performance. Gemini 2.5 Flash + GTA-1 achieved the highest task success rate with a context length of 10, and increasing the context length further led to poorer performance. This suggest the limitation of VLM in effectively utilizing historical context for reasoning. + +# Effect of Speech Recognition Errors + +All baseline experiments use the ground truth transcripts of user speech instructions as input to ensure that performance is not affected by errors in speech-to-text transcription. However, in real-world settings, instructions are of- + +ten given via speech. To reflect this, we evaluated the effect of speech recognition on the agent's performance by using the transcripts generated from a state-of-the-art automatic speech recognition (ASR) model, Whisper LargeV3 (Radford et al. 2023). Additionally, since users may not always be in quiet, controlled environments using a high-quality microphone like in our user experiment setup, we simulated noisy environments by injecting background noise with noise files from the Microsoft Scalable Noisy Speech Dataset (MS-SNSD) dataset (Reddy et al. 2019), following (Ying et al. 2024). The noise files include people talking in the background and keyboard typing sounds. As shown in Table 4, using speech recognition resulted in a $1.9\%$ drop in task success rate, and having noisy speech resulted in a further $1.9\%$ drop. In contrast, the word error rate (WER) of the ASR results increased from $1.4\%$ (original speech) to $28.1\%$ (noisy speech), a much larger performance drop compared to the final task performance. This result suggests that reasoning the true meanings of speech instructions by leveraging context can help mitigate errors from ASR. + +# Dataset Construction Details + +Video Segmenting. As shown in the video example, the interactive sessions are highly dynamic, and spoken instructions do not always align cleanly with specific screens or timesteps. Automatically segmenting instructions and matching them to corresponding webpages and actions using heuristics would risk significantly degrading data quality. Therefore, we manually segment the live sessions using video editing software to construct the final RealWebAssist dataset. All participants provided consent to have their speech recorded and included in this dataset. + +Bounding Box Labeling. As shown in Figure 7, certain instructions like "close all the tabs" may correspond to multiple valid actions, since closing any of the tabs first would be reasonable. Therefore, we add bounding boxes to all of the elements that would be correct. The bounding boxes are drawn manually using a Python tool built with tkinter, and the clickable regions are determined by a visual inspection of the webpage. + +# More Dataset Details + +# Evaluation detail + +User instructions in RealWebAssist require different operations on the webpage, including clicking, scrolling and typing. We believe that action types other than clicking is trivial (for typing actions, the benchmark includes the step of finding the correct place to type instead of the actual typing + +
VLM + GTA-1GPT-4o + GTA-18.423.572.7
Qwen 2.5 72B + GTA-19.324.369.0
Gemini 2.5 Flash + GTA-111.226.975.4
LRM + GTA-1Claude 3.7 Sonnet + GTA-112.126.768.8
Gemini 2.5 Pro + GTA-18.423.574.5
o1 + GTA-17.521.173.1
o3 + GTA-114.028.776.7
o4-mini + GTA-110.321.767.1
VLM + GUI-ACTORGPT-4o + GUI-Actor6.518.067.0
Qwen 2.5 72B + GUI-Actor9.321.464.9
Gemini 2.5 Flash + GUI-Actor10.325.673.1
LRM + GUI-ACTORClaude 3.7 Sonnet+ GUI-Actor7.518.563.9
Gemini 2.5 Pro + GUI-Actor9.324.073.2
o1 + GUI-Actor7.517.768.2
o3 + GUI-Actor12.127.474.0
o4-mini + GUI-Actor8.420.065.1
VLM + UI-TARSGPT-4o + UI-TARS6.520.867.3
Qwen 2.5 72B + UI-TARS7.521.863.2
Gemini 2.5 Flash + UI-TARS9.324.170.2
LRM + UI-TARSClaude 3.7 Sonnet + UI-TARS9.317.561.5
Gemini 2.5 Pro + UI-TARS7.523.471.6
o1 + UI-TARS6.518.566.0
o3 + UI-TARS12.127.272.4
o4-mini + UI-TARS7.519.462.5
+ +Table 3: Model Performance for pairing GTA-1, GUI-Actor and UI-TARS with all LRMs & VLMs, including task success rate, average progress, and step accuracy. All results are in %. + +
Input TranscriptTask SuccessProgressStep Accuracy
Ground Truth10.321.766.4
Whisper Large-V38.420.965.5
Whisper Large-V3 (Noise)6.520.663.4
+ +Table 4: Performance of GPT-4o + UGround-V1 using (1) ground-truth transcripts, (2) transcripts generated from original user speech by Whisper Large-V3, and (3) transcripts generated from noisy speech by Whisper Large-V3. + +process), so we only evaluate click-type actions with annotated bounding boxes are scored; instructions like "scroll" remain in the history but are not counted in our metrics. Of the 1,885 instructions, 1,412 are scored, yielding 1,714 evaluated action steps (one screenshot per step). Tasks average 17.6 evaluated steps. + +# User behaviors + +Figure 8 shows diverse user behaviors in RealWebAssist not present in previous benchmarks. We include a zip file of the live recordings (including audio) from which the examples are taken. + +Information seeking As Figure 8A shows, the user is seeking information from different aspects, like images and ratings, before they make the purchase decision. + +Comparing different options Figure 8B shows the process + +of the user viewing two candidates and finally make the decision between them. + +Changing minds In Figure 8C, the user is searching for some immersive dining experience. They are checking different restaurants and frequently change their minds when they see more options. + +Trial-and-error As Figure 8D shows, the user has several unsuccessful attempts when searching for men's fashion week. They refer to previous searches or initiate new ones to look for what they want. + +These diverse behaviors increase the complexity of the web assistance: instead of clearly defined-goals, the user themselves are also actively collecting knowledge to make decisions, which requires web assistant to follow the user's mind and act accordingly. + +![](images/85af84f589e6a25c2962527918386f0b16a85e37584505f52029ff88a2d1a816.jpg) +"Close all the tabs" + +# A Information seeking + +![](images/461528e1a0f26a59ab974cdb764d3878636aa1d9dbf8cf802bb648c7768dafcf.jpg) +Figure 7: Example of annotated bounding boxes for an instruction. The red boxes represent the correct bounding boxes. The user gave the instruction "Close all the tabs". For evaluation purposes, closing any of the tabs first is considered correct at each step, so all the x marks are labeled as correct at each step. + +# B Comparing different options + +![](images/d437676042ad38ac634c0d36d327cc10e98ca3bb9a29f7c189a150c8f1375f3c.jpg) + +# C Changing minds + +![](images/2804f0236f67ed074ccb568daf1db5e3988534a2f55bf4a59dc12969c3573f75.jpg) + +# D Trial-and-error + +![](images/8bfb0f9626c848519a4275d943677e17f7c84cbd0ee00c672e1e4d2e7106f0da.jpg) +Figure 8: Example of rich user behaviors in RealWebAssist. + +# Task # Description + +1 Buy a gift for each of my three friends with a budget of $100 +2 Find and buy a birthday gift for a friend who loves tech, within a $50 budget. +3 Purchase a cute water bottle for everyday use, under $15 +4 Compare different laptops and buy one with the best review +5 Purchase three home workout items under \(75 and compare their reviews before buying. + 6 Find and order a customized gift (e.g., engraved or personalized) for a friend's graduation under $60. + 7 Order a complete warm and durable winter outfit (jacket, gloves, and boots) under $200. +8 Get two sets of reusable grocery bags under \(20 total, checking for durability and eco-friendliness. +9 Buy two wall paintings for a family house, one for a 13-year old boy, one for a 6-year old girl +10 Purchase a set of colorful coffee mugs under $20 with fun designs +11 Buy a small easy-care indoor plant under \(15 and schedule delivery within three days +12 Get a colorful umbrella for under \(30, making sure it's big enough for two people +13 Buy a set of scented candles under $25, ensuring they have good reviews for long-lasting fragrance. +14 Find and purchase a durable phone case under $20 for an iPhone 14 Pro Max. +15 Order a cozy throw blanket under \(30, checking for softness and warmth. +16 Buy a set of three face masks (reusable & breathable) under $15. +17 Get a wireless Bluetooth speaker under \(40 with good bass and waterproofing. +18 Order a set of noise-canceling earplugs under $15, ensuring they're comfortable for sleep. +19 Find and buy a compact travel pillow and eye mask set under $30. +20 Purchase a set of six kitchen towels under \(20 with high absorbency. +21 Buy an adjustable desk lamp under \(35 with multiple brightness settings. +22 Order a pack of 12 gel pens under \(15 in assorted colors with smooth writing. +23 Purchase a waterproof picnic blanket under \(40, ensuring it's easy to fold and carry. +24 Buy a cute yet professional notebook under \(20 for journaling or work. +25 Find and purchase a comfortable memory foam seat cushion under \(35 for long sitting hours. +26 Order a set of reusable silicone food storage bags under $25. +27 Buy a pair of comfy indoor slippers under \(30 with high reviews for warmth and durability. +28 Purchase a portable mini humidifier under \(40 with USB charging. +29 Order a stylish travel makeup bag under \(25, ensuring it has multiple compartments. +30 Find and order a surprise gift box for a friend who enjoys skincare, under $50. +31 Compare wireless earbuds and purchase the best-reviewed pair under $100. +32 Order a budget-friendly yet stylish smartwatch under $\$ {75}$ ,ensuring good battery life. +33 Find and order a high-quality mechanical keyboard under $120, comparing typing feel and reviews +34 Find and buy a useful desk gadget under \(40 for a friend who works from home +35 Plan flights for a trip from US to Europe (at least two different countries) for 3 days, comparing different airlines to find the best deal. +36 Plan a 5-day trip to Japan, booking both flights and hotels, taking into account customer reviews. +37 Book a hotel for a weekend trip for a good price near the beach within the country, making sure you can cancel the trip at any time +38 Plan a spontaneous weekend trip to a destination with cheap last-minute flights and good hotel deals, for hotel make sure it's comfortable enough. +39 Book a luxury hotel for a weekend at a city in the west US, pay attention to different services offered +40 Plan a three-stop European trip in a single week, with flights and hotel for each place +41 Book hotel for a family tour of four to a kid-friendly destination, with a hotel offering family amenities and breakfast included. +42 Arrange a road trip across the US, booking rental cars and a mix of motels and boutique hotels along the route. +43 Book a romantic beach getaway in Hawaii for two people, make sure it's close to beach and have sea view + +# Task # Description + +44 Plan a family Disney Cruise, securing flights to Port Canaveral and a hotel near the theme parks before sailing. +45 Arrange a wine country getaway, booking flights to Napa Valley, a rental car, and a vineyard hotel with wine-tasting experiences. +46 Find flights and a convertible rental car for a coastal drive in Hawaii, staying in beachfront resorts along the way. +47 Choose flights to a popular ski destination and secure a lodge or hotel under \(150/night. +48 Book last-minute flights and a centrally located hotel in a major US city, focusing on deals under $100/night with great city landscape view. +49 Secure round-trip flights to a scenic South American city and book a comfortable hotel near local attractions. +50 Pick flights from a major US airport to a warm city in Canada, with a hotel under $100/night in the downtown area. +51 Schedule flights and a boutique hotel stay in a city rich in history, aiming for under $100/night in a central location. +52 Arrange direct flights to a popular theme park region, booking a nearby hotel or hotel with easy transportation +53 Schedule flights for a quick visit to a popular national park, booking a nearby lodge or hotel with scenic views. +54 Book round-trip flights to a major Middle Eastern city and reserve a modern hotel near historic sites for under $100/night +55 Secure flights from the US to a tropical island, choosing a resort that offers water sports +56 Find flights and a resort for a tropical vacation in Cancun, Mexico, focusing on all-inclusive options for relaxation +57 Book flights to Cairo for a 5-day trip, then pick a hotel with a direct view of the Pyramids and free breakfast included +58 Book a solo retreat to Kyoto, Japan, selecting a traditional ryokan stay with an onsen and authentic Japanese breakfast. +59 Buy tickets for 2 people to an NBA Basketball game next weekend. +60 Find and book tickets for a concert by a top artist in the nearest major city within the next three months. +61 Search for a last-minute concert ticket and find the best available seat. +62 Book 3 tickets for a rivalry match between two major sports teams +63 Book 3 tickets for a unique or unusual event, such as a drag show, wrestling match, or haunted experience +64 Purchase four tickets for a Broadway musical happening next month, aiming for orchestra seats if possible. +65 Buy tickets for a family of 4 with 2 kids to a MLB game +66 Find and book tickets to a popular stand-up comedy show in a western big city for the upcoming weekend, prioritizing seats near the front. +67 Locate discounted tickets for a live theater performance in California this weekend +Search for an NFL game next month and buy two tickets in a mid-priced seating section for some eastern teams +69 Identify and reserve tickets for a children's matinee performance at a local venue, comparing any available family packages or group discounts. +70 Secure seats for a must-see hockey match, comparing "Best Seat" options. +71 Find tickets for a classical music or orchestra concert in the nearest major city next month, aiming for seats with a good view of the stage. +72 Buy tickets for two people to an English Premier League soccer match in London city center next weekend. +73 Find and purchase tickets to a major electronic music festival in Las Vegas within the next two months. +74 Book seats for a stand-up comedy show in downtown Chicago next month, make sure the location is in city center. +75 Search for tickets to a top-tier cricket match in Sydney next month, aiming for seats that offer a good view of the pitch +76 Locate a family-friendly musical performance near your city for next month. + +# Task # Description + +77 Purchase two tickets to an upcoming rugby match in Dublin next month, making sure seats are in a central section and remain under. +78 Find a highly rated ballet or opera production in Paris within the next two months, choose the seat in the second floor if available +79 Find tickets to a major fashion event, such as a runway show or fashion week experience. +80 Look for tickets to a themed immersive dining experience (e.g., murder mystery dinner, fantasy-inspired restaurant) +81 Book tickets for UEFA soccer game between two Spanish teams for the next week +82 Book a ticket for a rooftop movie screening or outdoor film festival in a major city. +83 Find tickets for an esports event and compare standard vs. premium seating options. +84 Book a ticket for a "silent disco" event in a city of your choice. +85 secure two tickets to a major MLB game in a well-known ballpark anywhere in the U.S. next month, opting for seats along the first baseline. +86 Find and book tickets for a large-scale country music festival occurring in the southern U.S. within the next two months, focusing on general admission passes. +87 Purchase seats for a top-tier college football rivalry game taking place within the next six weeks, ensuring you can view the marching band's performance easily. +88 Reserve tickets to a major NHL match in the next two months, choosing seats close to the ice. +89 Book passes for a nationally touring art exhibition or immersive art experience within the next two months, ensuring weekend availability. +90 Secure seats for a top-rated Broadway musical in New York City, making sure the date aligns with a Saturday evening performance. +91 Reserve a spot for a special museum or cultural center night event (e.g., "Night at the Museum" or themed after-hours) in a major U.S. city within the next two months. +92 Find the best deal on a new smartphone (latest model iPhone or Samsung) +93 Find the best dinner deal for two using food delivery apps +94 Purchase an outfit for a formal event within a $150 budget + 95 Buy a high-quality gaming chair for under $250 +96 Find and book the best available concert tickets for a top artist in your city +97 Book tickets for a live theater performance and find a pre-show dinner reservation +98 Plan a sports game outing for two within a $150 budget +99 Plan a weekend getaway for two within a $500 budget +100 Organize a one-day itinerary for a solo traveler in a major city +101 Compare car rental options for a 5-day road trip +102 Find and book a local escape room challenge for a group of four +103 Plan a movie night with discounted tickets and snacks +104 Find a highly-rated sushi restaurant and order a meal for delivery +105 Plan a surprise birthday dinner at a fine dining restaurant +106 Order a late-night snack under $15 for delivery +107 Book a luxury hotel staycation for a weekend + +Full List of Websites + +
NameURLTask Type
ACL Festivalaclfestival.comEntertainment
Amazonamazon.comShopping
Ammooraammoora.comEntertainment
Appleapple.comShopping
Artechouseartechouse.comEntertainment
Atom Ticketsatomtickets.comEntertainment
Best Buybestbuy.comShopping
Adidas Arenabilletterie.adidasarena.comEntertainment
Broadwaybroadway.comEntertainment
Charm City Clue Roomcharmcityclueroom.comEntertainment
City Passcitypass.comTravel Planning
CN Towercntower.caTravel Planning
Colorado Tourismcolorado.comTravel Planning
Corsaircorsair.comShopping
Coupon Followcouponfollow.comShopping
Crave 4Dcrave4d.comEntertainment
Dine Immersivedineimmersive.comFood
Disney Cruisedisneycruise.disney.go.comTravel Planning
DoorDashdoordash.comFood
Drone and DSLRdroneandslr.comShopping
Enterpriseenterprise.comTravel Planning
ESChartsescharts.comEntertainment
ETIXetix.comEntertainment
Eventbriteeventbrite.comEntertainment
Expediaexpedia.comTravel Planning
Fashion Week Onlinefashionweekonline.comEntertainment
Fever Upfeverup.comEntertainment
Googlegoogle.comTravel Planning
Google Mapsgoogle.com/mapsTravel Planning
Live Nationlivenation.comEntertainment
Library of Congressloc.govTravel Planning
LoL Esportslolesports.comEntertainment
MLBmlb.comEntertainment
MLB Ticketsmlbtickets.comEntertainment
NYICFFnyicff.orgEntertainment
OpenTableopentable.comFood
Postmatespostmates.comFood
Rakutenrakuten.comShopping
Redditredgit.comEntertainment
Retail Me Notretailmenot.comShopping
Road Trip USAroadtripusa.comTravel Planning
Samsungsamsung.comShopping
San Lorenzo DCsanlorenzodc.comFood
Screen Dailyscreendaily.comEntertainment
Secret Baltimoresecretbaltimore.comTravel Planning
Secret Labsecretlab.coShopping
Smithsonian Sleepoverssmithsoniansleepovers.orgEntertainment
StubHubstubhub.comEntertainment
The Bureau Fashion Weekthebureaufashionweek.comEntertainment
The Meltdownthemeltdown.comEntertainment
The UFLtheufl.comEntertainment
Ticketmasterticketmaster.comEntertainment
Ticketmaster Franceticketmaster.frEntertainment
Ticket Webticketweb.comEntertainment
TickPicktickpick.comEntertainment
TripAdvisortripadvisor.comTravel Planning
Two Step Inntwostepinn.comEntertainment
Two Step Inn Frontgatetwostepinn.frontgatetickets.comEntertainment
Uberuber.comTravel Planning
Uber Eatsubereats.comFood
Viatorviator.comTravel Planning
Vivid Seatsvividseats.comEntertainment
Washington Tourismwashington.orgTravel Planning
Yelpyelp.comFood
Zarazara.comShopping
+ +# Word Frequency + +Figure 9 compares the most frequent instruction words in RealWebAssist with those from two common benchmarks, WebLINX and WebArena. The vocabulary used in RealWebAssist is more informal, as the dataset comes from natural spoken instructions. The tone is also more informal and conversational compared to WebLINX and WebArena. + +![](images/cf0f06fa68bc84e998eb72b04f0557365b08dfdb0d2f7b19b5f24754427b7de7.jpg) +Figure 9: Word Cloud of the most frequent words in RealWebAssist v.s. common benchmarks WebLINX and WebArena. + +# Instructions for the participants + +Thank you for participating in our study! You'll be guiding another person who is controlling the computer on your behalf. Imagine you are helping a friend navigate a website remotely, giving step-by-step instructions to complete a task. Feel free to interpret the task as you see fit. Here are some guidelines to keep in mind: + +- Give instructions as naturally as possible, just like you would in real life. +- You don't have to be overly precise—say what feels natural. +- You can only give one instruction at a time. After the operator follows your instruction, wait for them to complete it before giving the next step. +- Keep your instructions clear and concise, but don't stress too much about exact wording—just say what comes to mind! +- You are allowed to instruct the operator to use Google to search for things. + +# Video Example + +A sample raw recording can be viewed via the link below (audio included) + +https://youtu.be/CcyIt9tr5qo \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10445/images/01d4f6a679a498b0e3393f2d373eb46be9fc4e2c1ef4a99929fdd0f7130bbbfd.jpg b/data/2025/2504_10xxx/2504.10445/images/01d4f6a679a498b0e3393f2d373eb46be9fc4e2c1ef4a99929fdd0f7130bbbfd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfe0bdd00da6cf18e357a311bdec314da4242674 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/01d4f6a679a498b0e3393f2d373eb46be9fc4e2c1ef4a99929fdd0f7130bbbfd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7381a3472220df1c8fea6ed21507cda5e43d91ed3a8fd6d7d58c62915c0de9dc +size 1291 diff --git a/data/2025/2504_10xxx/2504.10445/images/0bda5b47032a5ddbecdab3f3d2d1a16c54e28d78bceb97b1b655b1b06d0f4b3a.jpg b/data/2025/2504_10xxx/2504.10445/images/0bda5b47032a5ddbecdab3f3d2d1a16c54e28d78bceb97b1b655b1b06d0f4b3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8323b84735444bba4d6c4db8e272365443a550d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/0bda5b47032a5ddbecdab3f3d2d1a16c54e28d78bceb97b1b655b1b06d0f4b3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a925395a4dfa2d8e51e264698c11a653d38d06baf4c446fc7ffc41e3bb8c388 +size 3368 diff --git a/data/2025/2504_10xxx/2504.10445/images/0ecfc95aeb49af0705291a27e30780180994aefb82a293996b90508495262b86.jpg b/data/2025/2504_10xxx/2504.10445/images/0ecfc95aeb49af0705291a27e30780180994aefb82a293996b90508495262b86.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3eb12c6745b16242ad16fe6926f41e9b84aaa73e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/0ecfc95aeb49af0705291a27e30780180994aefb82a293996b90508495262b86.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8e2d2d624113b4eb26209b2bcfe052ebc26434d9ab19b94ee03202ed3bf010d +size 3917 diff --git a/data/2025/2504_10xxx/2504.10445/images/13d42bd21ef1b84253b992a925229d3a9601fb7eb54415e8caeb8b799ee8b7b6.jpg b/data/2025/2504_10xxx/2504.10445/images/13d42bd21ef1b84253b992a925229d3a9601fb7eb54415e8caeb8b799ee8b7b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f3138cd2d8295a5575a3b0a7aaf0e55a320d564 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/13d42bd21ef1b84253b992a925229d3a9601fb7eb54415e8caeb8b799ee8b7b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3434bdd8cb6e4d663ec8c2e74cd07cf03b41a95957fe81ab4abd4b6af745626f +size 5311 diff --git a/data/2025/2504_10xxx/2504.10445/images/1560028b005585f00b7331b8ab6dda9bf201b15c7b0fadbaf391295da2699617.jpg b/data/2025/2504_10xxx/2504.10445/images/1560028b005585f00b7331b8ab6dda9bf201b15c7b0fadbaf391295da2699617.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7caffe9c201238b58f419e32988bb3f5b769822 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/1560028b005585f00b7331b8ab6dda9bf201b15c7b0fadbaf391295da2699617.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ada342b54251058cb5f71b4d699fd95363e11c11ae222aa78c4c0df4a22568f +size 39456 diff --git a/data/2025/2504_10xxx/2504.10445/images/1adfabb0cb91990ce5bddb0cf1cc7badf04e77434e247af1898187b45eb832b4.jpg b/data/2025/2504_10xxx/2504.10445/images/1adfabb0cb91990ce5bddb0cf1cc7badf04e77434e247af1898187b45eb832b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c51e1aefce22c1333cc82e6955993eee989c2c44 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/1adfabb0cb91990ce5bddb0cf1cc7badf04e77434e247af1898187b45eb832b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8689c97c8823797256dd573d6c7d78b1b7ec80eecc2303c48bf4116fccf700e +size 1325 diff --git a/data/2025/2504_10xxx/2504.10445/images/1eed3cbba66d7499656e171b12493d30247b2ce027294fbe4eca315736ec9e3a.jpg b/data/2025/2504_10xxx/2504.10445/images/1eed3cbba66d7499656e171b12493d30247b2ce027294fbe4eca315736ec9e3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d31d6923b7a8601ac32b756220baf3e3a7f6f4a3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/1eed3cbba66d7499656e171b12493d30247b2ce027294fbe4eca315736ec9e3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d97f566264bcd49267b16629a3bc920886efea4e783826437d63083194a279f +size 4888 diff --git a/data/2025/2504_10xxx/2504.10445/images/201d9e2f83e91b7ffbe99cf13a4b2f44ec488e6b23d6d455f891891130e95907.jpg b/data/2025/2504_10xxx/2504.10445/images/201d9e2f83e91b7ffbe99cf13a4b2f44ec488e6b23d6d455f891891130e95907.jpg new file mode 100644 index 0000000000000000000000000000000000000000..599bb8c488ca2162fff818c88f5ede361e2845f8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/201d9e2f83e91b7ffbe99cf13a4b2f44ec488e6b23d6d455f891891130e95907.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16bb18f1e4e86de9b37c30f78fe55f7019a9b24c78ba2aa6dd604e4e85b382dc +size 3786 diff --git a/data/2025/2504_10xxx/2504.10445/images/23a81543b1cddff082f6d87c6f256bc893c2a538cd4253b22058339db2d13e45.jpg b/data/2025/2504_10xxx/2504.10445/images/23a81543b1cddff082f6d87c6f256bc893c2a538cd4253b22058339db2d13e45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a380815464c8197f3cbbc3ab8b2d32c27266a5a7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/23a81543b1cddff082f6d87c6f256bc893c2a538cd4253b22058339db2d13e45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:710088b35afcecbcf0e531df7d4566aa6561a1e2d70203527497ad9d5277f228 +size 5141 diff --git a/data/2025/2504_10xxx/2504.10445/images/2804f0236f67ed074ccb568daf1db5e3988534a2f55bf4a59dc12969c3573f75.jpg b/data/2025/2504_10xxx/2504.10445/images/2804f0236f67ed074ccb568daf1db5e3988534a2f55bf4a59dc12969c3573f75.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9db9696476b3adb82dbab64d346f3484eaa60a92 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/2804f0236f67ed074ccb568daf1db5e3988534a2f55bf4a59dc12969c3573f75.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:258086998a130b34cf087698068ba67c736f586309002a433915cae6d74177d9 +size 46772 diff --git a/data/2025/2504_10xxx/2504.10445/images/2eead0b82df55cb899f35efdc9503c8c8ebdfc8a7ecc837bd3f5b544ab79cf9d.jpg b/data/2025/2504_10xxx/2504.10445/images/2eead0b82df55cb899f35efdc9503c8c8ebdfc8a7ecc837bd3f5b544ab79cf9d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48492cd820fb794da5e15977d9d92a01a412b60b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/2eead0b82df55cb899f35efdc9503c8c8ebdfc8a7ecc837bd3f5b544ab79cf9d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6893b9b784804387a610d27e07e62a7feb57b88befb2a17987aab1d4413c76ac +size 6217 diff --git a/data/2025/2504_10xxx/2504.10445/images/304fb0a3a410fe3d67552f171bcbdaf6ecdaff35d16ea1f23c4ff48f91fa6670.jpg b/data/2025/2504_10xxx/2504.10445/images/304fb0a3a410fe3d67552f171bcbdaf6ecdaff35d16ea1f23c4ff48f91fa6670.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cdd434be682ead21dfe46a46860966e60e1d1151 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/304fb0a3a410fe3d67552f171bcbdaf6ecdaff35d16ea1f23c4ff48f91fa6670.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbb377da875683c0f63e5ded44fba7afb39240bcd13e67365dfadb01e8ecc7b4 +size 7259 diff --git a/data/2025/2504_10xxx/2504.10445/images/3262cf9379b85a21d2a4c5c9a55cea3280c0ffedb92bc61498969f0ae3157c4a.jpg b/data/2025/2504_10xxx/2504.10445/images/3262cf9379b85a21d2a4c5c9a55cea3280c0ffedb92bc61498969f0ae3157c4a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0469fc8cbde53b79c0eae24ce532346771d6c9d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/3262cf9379b85a21d2a4c5c9a55cea3280c0ffedb92bc61498969f0ae3157c4a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34f407fdc3dbba725e4b51003677ec4ca0718c9597437947de1f4eded1497a69 +size 12796 diff --git a/data/2025/2504_10xxx/2504.10445/images/3582dddae1e60403cc5a6b850ad94c9bf2fadd65918f9cdaa0af5f00f8e90161.jpg b/data/2025/2504_10xxx/2504.10445/images/3582dddae1e60403cc5a6b850ad94c9bf2fadd65918f9cdaa0af5f00f8e90161.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53e79ba9a20e312405421ff5e1ffd77491097096 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/3582dddae1e60403cc5a6b850ad94c9bf2fadd65918f9cdaa0af5f00f8e90161.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7868e389d58fa44ab94878b8804c89114d1c55bfb59c9e6ae593414ec8e4c76f +size 5812 diff --git a/data/2025/2504_10xxx/2504.10445/images/3a00d16673abd9bdbb7dc446ffd3e9a5cef52dfef2decba6e9b846a3e3006f18.jpg b/data/2025/2504_10xxx/2504.10445/images/3a00d16673abd9bdbb7dc446ffd3e9a5cef52dfef2decba6e9b846a3e3006f18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e302e20d66d8cfbfe9189d69d5cf4537018d1682 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/3a00d16673abd9bdbb7dc446ffd3e9a5cef52dfef2decba6e9b846a3e3006f18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6a73a1ac4d184ca32cdf791557e0183ad66cbf8c668512d22d971023c011166 +size 5278 diff --git a/data/2025/2504_10xxx/2504.10445/images/461528e1a0f26a59ab974cdb764d3878636aa1d9dbf8cf802bb648c7768dafcf.jpg b/data/2025/2504_10xxx/2504.10445/images/461528e1a0f26a59ab974cdb764d3878636aa1d9dbf8cf802bb648c7768dafcf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b1e8b12585031449fe1b62be920956fcb7864ab --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/461528e1a0f26a59ab974cdb764d3878636aa1d9dbf8cf802bb648c7768dafcf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67b466dacf29863f97833594276643040a828ea4f1a05b0f9dda842cf2580773 +size 47164 diff --git a/data/2025/2504_10xxx/2504.10445/images/4d4d39ce25fbccf3bc1671ae26309bdc0979314bc11fc2de8468d2c3799b9ef5.jpg b/data/2025/2504_10xxx/2504.10445/images/4d4d39ce25fbccf3bc1671ae26309bdc0979314bc11fc2de8468d2c3799b9ef5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92c66e031b540fee18109369bf027adb6eb5f182 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/4d4d39ce25fbccf3bc1671ae26309bdc0979314bc11fc2de8468d2c3799b9ef5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a40ffbb83f97f05cad0242bc4cf9f66fe0290160cc65c8848e7dbf8414e05bf7 +size 55316 diff --git a/data/2025/2504_10xxx/2504.10445/images/4f1154756e2c1ea4c745646e35c604eedd93c1b1fcccdb866bdd547aa94e8897.jpg b/data/2025/2504_10xxx/2504.10445/images/4f1154756e2c1ea4c745646e35c604eedd93c1b1fcccdb866bdd547aa94e8897.jpg new file mode 100644 index 0000000000000000000000000000000000000000..630d8ae9f6e942ba5f9c73cbd696d6c6f727604b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/4f1154756e2c1ea4c745646e35c604eedd93c1b1fcccdb866bdd547aa94e8897.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ddc0ac235e55636f38ff73b7ac979cc5904fd822a6b85e3dd2e552d6a85095d +size 5368 diff --git a/data/2025/2504_10xxx/2504.10445/images/5e15a46f4526020f333c8caa9d15eeec63cb8f6981f624f2ab01f59fc343101e.jpg b/data/2025/2504_10xxx/2504.10445/images/5e15a46f4526020f333c8caa9d15eeec63cb8f6981f624f2ab01f59fc343101e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e98e6cd6ca487555d2d0ec2f1503b66609c73f67 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/5e15a46f4526020f333c8caa9d15eeec63cb8f6981f624f2ab01f59fc343101e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c85d60ac33cb7f3ac07c4290c364900e6eb976ff216ff2b33253bbfb8793681d +size 13768 diff --git a/data/2025/2504_10xxx/2504.10445/images/62cf7409464e0df9224fbc514bcfada2521f09cde0a5801f98f118c975b7f24f.jpg b/data/2025/2504_10xxx/2504.10445/images/62cf7409464e0df9224fbc514bcfada2521f09cde0a5801f98f118c975b7f24f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3bdc958a8bc18e27f64c64897ce5ff4d45e51d3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/62cf7409464e0df9224fbc514bcfada2521f09cde0a5801f98f118c975b7f24f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad453146080e266b331e3e3b86a71b29514949a50fd7e822587b1c2091d2be1f +size 5642 diff --git a/data/2025/2504_10xxx/2504.10445/images/63902a9f6317abf275642953e8b40809c46f12a3b365a3c43d271fe3a613ffd9.jpg b/data/2025/2504_10xxx/2504.10445/images/63902a9f6317abf275642953e8b40809c46f12a3b365a3c43d271fe3a613ffd9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38a734eba05d9cfa816248b3e7e4471a0f1d7a57 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/63902a9f6317abf275642953e8b40809c46f12a3b365a3c43d271fe3a613ffd9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8027b3551e518ba68d0ae3af69205a07bbaf872c72b6301c31d6bfe12683edfc +size 4735 diff --git a/data/2025/2504_10xxx/2504.10445/images/723238ade76f571ce35b12a7b4568326c588d00e3cdcc48105287acc73f8c99e.jpg b/data/2025/2504_10xxx/2504.10445/images/723238ade76f571ce35b12a7b4568326c588d00e3cdcc48105287acc73f8c99e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df7534fe87112ae2d236486bda0414161adffc72 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/723238ade76f571ce35b12a7b4568326c588d00e3cdcc48105287acc73f8c99e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e03ebc54b54cb1c5e1111895047eb12e5b00cc5faf253be13fbabba6a1b5145 +size 6487 diff --git a/data/2025/2504_10xxx/2504.10445/images/7a41bced840724c2c6501da63e165f94557a7e0f02f89d9734f7562716e63e71.jpg b/data/2025/2504_10xxx/2504.10445/images/7a41bced840724c2c6501da63e165f94557a7e0f02f89d9734f7562716e63e71.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9eeefb77eaac650857d7b8ae6da8a090a5763dc7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/7a41bced840724c2c6501da63e165f94557a7e0f02f89d9734f7562716e63e71.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4974e5c1cf038476485f8122423f46db3f4089ade43fa7cb974feedeaaaac1a +size 5770 diff --git a/data/2025/2504_10xxx/2504.10445/images/7e15864243a34993ec1a5ccc34c216e8c768944297d0dc2929d319e413d09841.jpg b/data/2025/2504_10xxx/2504.10445/images/7e15864243a34993ec1a5ccc34c216e8c768944297d0dc2929d319e413d09841.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d87a13dd5d58ac90e3173901f17f07053e04e41 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/7e15864243a34993ec1a5ccc34c216e8c768944297d0dc2929d319e413d09841.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a872e639d75214dd3a69a619f5c5cce5a888384b6968144a8eaff969bb9498eb +size 113693 diff --git a/data/2025/2504_10xxx/2504.10445/images/7efbfd1a507fb1217230f8a285f7a1064e7d85faba0ce51e3b238de71309c6dd.jpg b/data/2025/2504_10xxx/2504.10445/images/7efbfd1a507fb1217230f8a285f7a1064e7d85faba0ce51e3b238de71309c6dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13314a7e533bd965d9856e6b94a62fbcdea61c32 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/7efbfd1a507fb1217230f8a285f7a1064e7d85faba0ce51e3b238de71309c6dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa35c9f7106595339f55688b107a74edad8fe38051e630a01501620acff6ea16 +size 36954 diff --git a/data/2025/2504_10xxx/2504.10445/images/8022060c99b56e9562de56293f5888d82c66f54c2b561b33fb7e638919355843.jpg b/data/2025/2504_10xxx/2504.10445/images/8022060c99b56e9562de56293f5888d82c66f54c2b561b33fb7e638919355843.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6370ec6ea60ba9f788ee0492719e521624541faf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/8022060c99b56e9562de56293f5888d82c66f54c2b561b33fb7e638919355843.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acc83b5d46c156fa91213f2804674be5cebcef6a1b4eaa7e014ff36c3be78681 +size 3920 diff --git a/data/2025/2504_10xxx/2504.10445/images/8546b193f6d0948328afa144cb4d9752a6d699f8d9b30d3fcd79c63fc862dbcf.jpg b/data/2025/2504_10xxx/2504.10445/images/8546b193f6d0948328afa144cb4d9752a6d699f8d9b30d3fcd79c63fc862dbcf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2abbff9827664bf0e20aa1cbd39fd8139a3fd5b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/8546b193f6d0948328afa144cb4d9752a6d699f8d9b30d3fcd79c63fc862dbcf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8668e4ac156eb61368906550fa81a093eb8fb9151f0a730e0dbceac6611a403a +size 1292 diff --git a/data/2025/2504_10xxx/2504.10445/images/85af84f589e6a25c2962527918386f0b16a85e37584505f52029ff88a2d1a816.jpg b/data/2025/2504_10xxx/2504.10445/images/85af84f589e6a25c2962527918386f0b16a85e37584505f52029ff88a2d1a816.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b34ed844d2a3a27726ae322a7dce65b569d3b83 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/85af84f589e6a25c2962527918386f0b16a85e37584505f52029ff88a2d1a816.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36e8e39a1492ea0bf74e80d0b0bd5a9df7a65d35d29669839eb771e7c041159f +size 52130 diff --git a/data/2025/2504_10xxx/2504.10445/images/866645a1921b87066ab8f5f51fe034ee26f062d6eabfb1a6d939229e7b2578e6.jpg b/data/2025/2504_10xxx/2504.10445/images/866645a1921b87066ab8f5f51fe034ee26f062d6eabfb1a6d939229e7b2578e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9543179f23a36a4bb858f3f55159fc01068d7135 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/866645a1921b87066ab8f5f51fe034ee26f062d6eabfb1a6d939229e7b2578e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1045a52b5970e3507930ccfbd18cfe915ef76745db1ff9cef27d509e54b42d47 +size 3364 diff --git a/data/2025/2504_10xxx/2504.10445/images/8bfb0f9626c848519a4275d943677e17f7c84cbd0ee00c672e1e4d2e7106f0da.jpg b/data/2025/2504_10xxx/2504.10445/images/8bfb0f9626c848519a4275d943677e17f7c84cbd0ee00c672e1e4d2e7106f0da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f3f337b8c604dcee559c46443d19faa232ec3fd0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/8bfb0f9626c848519a4275d943677e17f7c84cbd0ee00c672e1e4d2e7106f0da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ff9085e5cfeb3c569ce9041b3bc46777be2a62d4dbc71cb50f6c022ca1fd49a +size 36633 diff --git a/data/2025/2504_10xxx/2504.10445/images/8f9ec67c1254812380c80b3c73fa1c66e3a246c33c0fee2617f7b7621f8fb749.jpg b/data/2025/2504_10xxx/2504.10445/images/8f9ec67c1254812380c80b3c73fa1c66e3a246c33c0fee2617f7b7621f8fb749.jpg new file mode 100644 index 0000000000000000000000000000000000000000..304fe48f13bfc567e27e9e2653f5607cdd78c5bf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/8f9ec67c1254812380c80b3c73fa1c66e3a246c33c0fee2617f7b7621f8fb749.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07c51f5a61d281e0fe911e747a170836011a783a464b772f2bbac6dd65524b8c +size 5443 diff --git a/data/2025/2504_10xxx/2504.10445/images/95dcf2badd6229a6cb610ae8fd4f5db862d76b97acd78d06b8096a64989af287.jpg b/data/2025/2504_10xxx/2504.10445/images/95dcf2badd6229a6cb610ae8fd4f5db862d76b97acd78d06b8096a64989af287.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65eda444cedc5329f4f4e936bba52f2a2dcacdbe --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/95dcf2badd6229a6cb610ae8fd4f5db862d76b97acd78d06b8096a64989af287.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3017002d9bfcc17d2d8e0ac1df93b4bc62d871a5f5393116de3d85cbcaf10f35 +size 25731 diff --git a/data/2025/2504_10xxx/2504.10445/images/9a13fc77395a1b436583c1dc8a2907791f0c87b3cee6c2fd0a7775d0ee8bcce6.jpg b/data/2025/2504_10xxx/2504.10445/images/9a13fc77395a1b436583c1dc8a2907791f0c87b3cee6c2fd0a7775d0ee8bcce6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..749874ca61db7cc0d2cee3d8b6ab38541272da2c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/9a13fc77395a1b436583c1dc8a2907791f0c87b3cee6c2fd0a7775d0ee8bcce6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d01911fb75ccfdf1b26c9c4fd6f915293edb074a68cf89eb72e9855285187fb0 +size 6135 diff --git a/data/2025/2504_10xxx/2504.10445/images/9c19821c5df1f6bd8f461904cde24337092636ed53ed5bafbef20c8cbd7b1197.jpg b/data/2025/2504_10xxx/2504.10445/images/9c19821c5df1f6bd8f461904cde24337092636ed53ed5bafbef20c8cbd7b1197.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc37cc9ff1f70e23cdbca04b6ede61bf464c7d03 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/9c19821c5df1f6bd8f461904cde24337092636ed53ed5bafbef20c8cbd7b1197.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7985b7ff903d99b2ef461997513c070c0043a1393a759063e6b68879e3d0d07e +size 3359 diff --git a/data/2025/2504_10xxx/2504.10445/images/9f7d7479bdb7dfe1cbc2f6af2434f79d35b23ee13e316a307b48b24a38f0979c.jpg b/data/2025/2504_10xxx/2504.10445/images/9f7d7479bdb7dfe1cbc2f6af2434f79d35b23ee13e316a307b48b24a38f0979c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27aaa0a84e92bfe964efabf75100e75cfef43476 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/9f7d7479bdb7dfe1cbc2f6af2434f79d35b23ee13e316a307b48b24a38f0979c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5292c694a9bd3d0e01d394d435d12ab14f806440efef7a2ba094921eff39d6c0 +size 4049 diff --git a/data/2025/2504_10xxx/2504.10445/images/aba113943b6a24b652e0b42d4d023fff645dee9bacf04ec2456b975dd33c656f.jpg b/data/2025/2504_10xxx/2504.10445/images/aba113943b6a24b652e0b42d4d023fff645dee9bacf04ec2456b975dd33c656f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..373633c5d76e3a4066a197eb5434baac355f2cda --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/aba113943b6a24b652e0b42d4d023fff645dee9bacf04ec2456b975dd33c656f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2cafaeb2d52421f9e99770c195d64cf86a7a6587f4ef97305e0d27b27b80971 +size 47475 diff --git a/data/2025/2504_10xxx/2504.10445/images/b44e9d6483d35381371556f2f7874777315f6c1b282d7f8ece0c0bb172957c07.jpg b/data/2025/2504_10xxx/2504.10445/images/b44e9d6483d35381371556f2f7874777315f6c1b282d7f8ece0c0bb172957c07.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f37a6caa6e42454f734503651eab5cb779e8196 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/b44e9d6483d35381371556f2f7874777315f6c1b282d7f8ece0c0bb172957c07.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:242aa8e67906e400a8831b2c06e22426f12cba20d6f56d3339c0f43432e9a686 +size 3969 diff --git a/data/2025/2504_10xxx/2504.10445/images/b930b1c1eb8244d7e57f1f9e3f48151543e6440e4c9c22d1b7738667ce567590.jpg b/data/2025/2504_10xxx/2504.10445/images/b930b1c1eb8244d7e57f1f9e3f48151543e6440e4c9c22d1b7738667ce567590.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4920ece698935f9eca01c2bd384f7f56c0363a80 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/b930b1c1eb8244d7e57f1f9e3f48151543e6440e4c9c22d1b7738667ce567590.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70a2cbccadcc405bdae3b0918212b6fe5c7cff4cc15c0db11425cfefb53e247a +size 4485 diff --git a/data/2025/2504_10xxx/2504.10445/images/ba5e71d4116049f60b5bc9cd16dfe9a18244ef18f0bc79b68f33743507ec46fb.jpg b/data/2025/2504_10xxx/2504.10445/images/ba5e71d4116049f60b5bc9cd16dfe9a18244ef18f0bc79b68f33743507ec46fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..60aaf7c74155322ff3b2247b5e1103f8c3236660 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/ba5e71d4116049f60b5bc9cd16dfe9a18244ef18f0bc79b68f33743507ec46fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cddea792ae69bcc93d581c82caa77a3f421274a0e3379ece4b37856225a493c +size 5000 diff --git a/data/2025/2504_10xxx/2504.10445/images/c0dcda412e8aea234339339191505fc7c36ae8cf5fbe152af1a3683d49902da2.jpg b/data/2025/2504_10xxx/2504.10445/images/c0dcda412e8aea234339339191505fc7c36ae8cf5fbe152af1a3683d49902da2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d21faca157dc57c21535adb5f7c7645b9f44804 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/c0dcda412e8aea234339339191505fc7c36ae8cf5fbe152af1a3683d49902da2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62811cd7f7b57070480a2ea7fa092eb902cd6b32772dbb7893d2b0c7e92af448 +size 26712 diff --git a/data/2025/2504_10xxx/2504.10445/images/c76f097e0d7a8be85904af9e0ba26cc8688b241d6345fd0e81205598c4cfec7a.jpg b/data/2025/2504_10xxx/2504.10445/images/c76f097e0d7a8be85904af9e0ba26cc8688b241d6345fd0e81205598c4cfec7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d1e757bdc8110ae1bb15e7da2aa66509e58e5d9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/c76f097e0d7a8be85904af9e0ba26cc8688b241d6345fd0e81205598c4cfec7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de6f21a6e31beca32ff9a2b6a73d2608c2a55f9758360d71599f7428dd6511fe +size 5384 diff --git a/data/2025/2504_10xxx/2504.10445/images/cb5d8e54ab382218c82963c419815a5545b40036eb8a04989b805fb5a8943dca.jpg b/data/2025/2504_10xxx/2504.10445/images/cb5d8e54ab382218c82963c419815a5545b40036eb8a04989b805fb5a8943dca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b371b7393f7c712dd00cca6fe32d00f71e485f8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/cb5d8e54ab382218c82963c419815a5545b40036eb8a04989b805fb5a8943dca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34cddb4de9093cf4a8f29c301ce0bec9d23c74b28bc20174109628550d0ee53e +size 6484 diff --git a/data/2025/2504_10xxx/2504.10445/images/cf0f06fa68bc84e998eb72b04f0557365b08dfdb0d2f7b19b5f24754427b7de7.jpg b/data/2025/2504_10xxx/2504.10445/images/cf0f06fa68bc84e998eb72b04f0557365b08dfdb0d2f7b19b5f24754427b7de7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..420b675a3f2cc00c567e40adb88a5152032e72ba --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/cf0f06fa68bc84e998eb72b04f0557365b08dfdb0d2f7b19b5f24754427b7de7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9c1e7ab303dffd0a7c5aacf79403aa71e824d5a32fb7be1152e3717d8dcc92f +size 26348 diff --git a/data/2025/2504_10xxx/2504.10445/images/d264e9d9b78f0d24469e8645a6781a8b7b70be1f55ed0a83e526bba6feb6e03f.jpg b/data/2025/2504_10xxx/2504.10445/images/d264e9d9b78f0d24469e8645a6781a8b7b70be1f55ed0a83e526bba6feb6e03f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b28731243bb069001a35e48bbc133afdfefabcf2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/d264e9d9b78f0d24469e8645a6781a8b7b70be1f55ed0a83e526bba6feb6e03f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87c9ec7143964f68af424a1a3a80dcd7b7434170a979f3606886e7b6c502f310 +size 69322 diff --git a/data/2025/2504_10xxx/2504.10445/images/d2e93543deec6e7df545c85e4659b87e11f9d991874be90009730826ed4e310e.jpg b/data/2025/2504_10xxx/2504.10445/images/d2e93543deec6e7df545c85e4659b87e11f9d991874be90009730826ed4e310e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d63e056225a7f3ed0ccf33836a172e6f7720e95c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/d2e93543deec6e7df545c85e4659b87e11f9d991874be90009730826ed4e310e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9e3f1099873e8f260dd8ced20a0c194595444c2b9e4402541330fbc8ec33e28 +size 79977 diff --git a/data/2025/2504_10xxx/2504.10445/images/d437676042ad38ac634c0d36d327cc10e98ca3bb9a29f7c189a150c8f1375f3c.jpg b/data/2025/2504_10xxx/2504.10445/images/d437676042ad38ac634c0d36d327cc10e98ca3bb9a29f7c189a150c8f1375f3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..325eb1462c7161a6dd3a4c4ceabe2fc2a70668a0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/d437676042ad38ac634c0d36d327cc10e98ca3bb9a29f7c189a150c8f1375f3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ae86005bd31d4399db7160cd08ccc96153d705fb1b0f0c967d4c942b571f7fc +size 44553 diff --git a/data/2025/2504_10xxx/2504.10445/images/d855e2cf8493411bd282771d1d34a5bab6d962d3dd3397a281310a818ed5544c.jpg b/data/2025/2504_10xxx/2504.10445/images/d855e2cf8493411bd282771d1d34a5bab6d962d3dd3397a281310a818ed5544c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69ea2a29b386f1f7bf45b7f4df3be4ef0f2b338b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/d855e2cf8493411bd282771d1d34a5bab6d962d3dd3397a281310a818ed5544c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3aee35bf015e922192b932be2973ba001c768e9710c91632c74345713042000f +size 6745 diff --git a/data/2025/2504_10xxx/2504.10445/images/e28c5b91165be0534e0d20d81aba0b282b8600cebcd81bf68b8bff288a0a0eca.jpg b/data/2025/2504_10xxx/2504.10445/images/e28c5b91165be0534e0d20d81aba0b282b8600cebcd81bf68b8bff288a0a0eca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..371dcdf9140ea66cfdb702174245198ba4d6d980 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/e28c5b91165be0534e0d20d81aba0b282b8600cebcd81bf68b8bff288a0a0eca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f09582d2e94161b730dff7bf26ebb91e25fd624f579799827f8c46314246dce7 +size 132752 diff --git a/data/2025/2504_10xxx/2504.10445/images/e2cec70a35ac87184e9b818babf826a00e91b24e6d5296ff9e8a8dc347947d66.jpg b/data/2025/2504_10xxx/2504.10445/images/e2cec70a35ac87184e9b818babf826a00e91b24e6d5296ff9e8a8dc347947d66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b795803a6449104d574aa66ae4e7aef7eae980c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/e2cec70a35ac87184e9b818babf826a00e91b24e6d5296ff9e8a8dc347947d66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:856e3f26858f4ba7d5cae6d1041fc0875e81eec574b654e64b86ed59b5128288 +size 239114 diff --git a/data/2025/2504_10xxx/2504.10445/images/e47eaaa5ae2da59bed97e795c4703cf3c7e3ddd2ecd60129652f86f4bc7e9674.jpg b/data/2025/2504_10xxx/2504.10445/images/e47eaaa5ae2da59bed97e795c4703cf3c7e3ddd2ecd60129652f86f4bc7e9674.jpg new file mode 100644 index 0000000000000000000000000000000000000000..682aa11a09fe8c6b1949b83b741491d8c8fa040e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/e47eaaa5ae2da59bed97e795c4703cf3c7e3ddd2ecd60129652f86f4bc7e9674.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49a15221ef152413ee93971a7519828ed910a313080bfdb8780c881b6d863c82 +size 1281 diff --git a/data/2025/2504_10xxx/2504.10445/images/f1bb03f90e27f5b8f5b31973278bf8c3eb46cfe6cc37a5093cd408495df0d85d.jpg b/data/2025/2504_10xxx/2504.10445/images/f1bb03f90e27f5b8f5b31973278bf8c3eb46cfe6cc37a5093cd408495df0d85d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d410d576967dbcd2293947a8592fbd723c2b2cd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/images/f1bb03f90e27f5b8f5b31973278bf8c3eb46cfe6cc37a5093cd408495df0d85d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b85a21802caead00adae87caa25308bfe7929bb2325b3dfb5c4372e9a39be70c +size 57508 diff --git a/data/2025/2504_10xxx/2504.10445/layout.json b/data/2025/2504_10xxx/2504.10445/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..da24d8afd369eeddc033fcd01cf23ea4171e7177 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10445/layout.json @@ -0,0 +1,13715 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 53, + 95, + 557, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 95, + 557, + 128 + ], + "spans": [ + { + "bbox": [ + 53, + 95, + 557, + 128 + ], + "type": "text", + "content": "RealWebAssist: A Benchmark for Long-Horizon Web Assistance with Real-World Users" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 137, + 536, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 137, + 536, + 153 + ], + "spans": [ + { + "bbox": [ + 76, + 137, + 536, + 153 + ], + "type": "text", + "content": "Suyu Ye\\*, Haojun Shi\\*, Darren Shih 1, Hyokun Yun 2, Tanya G. Roosta 2, Tianmin Shu" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 249, + 155, + 361, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 155, + 361, + 167 + ], + "spans": [ + { + "bbox": [ + 249, + 155, + 361, + 167 + ], + "type": "text", + "content": "1Johns Hopkins University," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 275, + 167, + 335, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 275, + 167, + 335, + 177 + ], + "spans": [ + { + "bbox": [ + 275, + 167, + 335, + 177 + ], + "type": "text", + "content": "2Amazon.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 177, + 472, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 177, + 472, + 190 + ], + "spans": [ + { + "bbox": [ + 138, + 177, + 472, + 190 + ], + "type": "text", + "content": "{sye10, hshi33, dshih5, tianmin.shu}@jhu.edu, {yunhyoku,troosta} " + }, + { + "bbox": [ + 138, + 177, + 472, + 190 + ], + "type": "inline_equation", + "content": "@" + }, + { + "bbox": [ + 138, + 177, + 472, + 190 + ], + "type": "text", + "content": " amazon.com" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 152, + 217, + 192, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 217, + 192, + 227 + ], + "spans": [ + { + "bbox": [ + 152, + 217, + 192, + 227 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 61, + 233, + 284, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 233, + 284, + 474 + ], + "spans": [ + { + "bbox": [ + 61, + 233, + 284, + 474 + ], + "type": "text", + "content": "To achieve successful assistance with long-horizon web-based tasks, AI agents must be able to sequentially follow real-world user instructions over a long period. Unlike existing web-based agent benchmarks, sequential instruction following in the real world poses significant challenges beyond performing a single, clearly defined task. For instance, real-world human instructions can be ambiguous, require different levels of AI assistance, and may evolve over time, reflecting changes in the user's mental state. To address this gap, we introduce RealWebAssist, a novel benchmark designed to evaluate sequential instruction-following in realistic scenarios involving long-horizon interactions with the web, visual GUI grounding, and understanding ambiguous real-world user instructions. RealWebAssist includes a dataset of sequential instructions collected from real-world human users. Each user instructs a web-based assistant to perform a series of tasks on multiple websites. A successful agent must reason about the true intent behind each instruction, keep track of the mental state of the user, understand user-specific routines, and ground the intended tasks to actions on the correct GUI elements. Our experimental results show that state-of-the-art models struggle to understand and ground user instructions, posing critical challenges in following real-world user instructions for long-horizon web assistance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 488, + 206, + 500 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 488, + 206, + 500 + ], + "spans": [ + { + "bbox": [ + 138, + 488, + 206, + 500 + ], + "type": "text", + "content": "Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 502, + 293, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 502, + 293, + 645 + ], + "spans": [ + { + "bbox": [ + 50, + 502, + 293, + 645 + ], + "type": "text", + "content": "As an integral part of people's daily life, many of our everyday tasks are performed on the internet. With the tremendous advances in open-ended agents driven by large reasoning models (LRMs) and vision-language models (VLMs), there has been increasing interest in engineering web-based agents that can assist humans with complex tasks on the web following humans' instructions (Zheng et al. 2024a; Nakano et al. 2022). Recent works have demonstrated the promising performance of web-based agents on planning (Putta et al. 2024; Wang et al. 2024; Yao et al. 2023) and Graphical User Interface (GUI) grounding (Cheng et al. 2024; Wu et al. 2024b; Gou et al. 2024; Yang et al. 2024; Xu et al. 2024), across diverse websites, tasks, and GUI interfaces." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 646, + 293, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 646, + 293, + 668 + ], + "spans": [ + { + "bbox": [ + 51, + 646, + 293, + 668 + ], + "type": "text", + "content": "Despite these encouraging results, there have not been systematic studies on long-horizon web assistance with real-" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 315, + 217, + 559, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 217, + 559, + 304 + ], + "spans": [ + { + "bbox": [ + 315, + 217, + 559, + 304 + ], + "type": "text", + "content": "world users. Existing benchmarks (e.g., (Zhou et al. 2023; Deng et al. 2024; Cheng et al. 2024; Yao et al. 2022; Jang et al. 2024)) typically focus on performing a task based on a single instruction. Additionally, the instructions in the current benchmarks were not collected from real users during natural web use sessions, lacking the realism of real user instructions. As a result, these benchmarks do not capture the full complexity of real users' web behavior and instructions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 305, + 558, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 305, + 558, + 460 + ], + "spans": [ + { + "bbox": [ + 315, + 305, + 558, + 460 + ], + "type": "text", + "content": "To bridge this gap, we propose RealWebAssist, the first sequential instruction following benchmark that evaluates long-horizon web assistance with real-world users. As illustrated in Figure 1, to perform a task, a user will instruct an AI assistant in a long sequence. Based on the past instructions and screenshots, the AI assistant must execute one or a few steps of actions to perform the latest instruction. Additionally, a user can engage in repeated interactions over a series of tasks with the assistant in a long session up to 40 minutes. To construct RealWebAssist, we recruited real users to instruct an assistant to perform multiple real-world tasks on the web. We created a large dataset with real user instructions (in both speech and text) for diverse real-world tasks and websites (as shown in Figure 2)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 460, + 559, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 460, + 559, + 680 + ], + "spans": [ + { + "bbox": [ + 315, + 460, + 559, + 680 + ], + "type": "text", + "content": "The sequential instruction following tasks in our RealWebAssist benchmark reflect the natural human behavior on the web. First, real-world users may not initially know what they are looking for. Thus, they need to engage in information seeking on multiple web pages (e.g., step 1-2 in Figure 1), sometimes even across websites. Second, based on new information such as product reviews, users may change their minds (e.g., step 3). Third, users give simple instructions that are seemingly ambiguous out of the context but could be interpreted based on spatial and temporal context via pragmatic reasoning (Goodman and Frank 2016; Fried et al. 2023). For instance, the third instruction in Figure 1 does not explicitly describe which product, but an intelligent assistant should be able to infer the true user intent and correctly select the product in the user's mind. Lastly, in our benchmark, users can browse the websites and have the autonomy to make critical decisions (such as purchasing) on their own, which is complementary to existing benchmarks that focus on agents' planning ability to fully complete the tasks without human involvement." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 681, + 558, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 681, + 558, + 704 + ], + "spans": [ + { + "bbox": [ + 316, + 681, + 558, + 704 + ], + "type": "text", + "content": "We systematically evaluate state-of-the-art models, including GUI grounding, VLMs, and large reasoning mod" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 226, + 35, + 562 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 226, + 35, + 562 + ], + "spans": [ + { + "bbox": [ + 14, + 226, + 35, + 562 + ], + "type": "text", + "content": "arXiv:2504.10445v2 [cs.AI] 1 Dec 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 673, + 293, + 704 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 673, + 293, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 673, + 293, + 704 + ], + "type": "text", + "content": "*These authors contributed equally. Copyright © 2026, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved." + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 65, + 52, + 526, + 163 + ], + "blocks": [ + { + "bbox": [ + 65, + 52, + 526, + 163 + ], + "lines": [ + { + "bbox": [ + 65, + 52, + 526, + 163 + ], + "spans": [ + { + "bbox": [ + 65, + 52, + 526, + 163 + ], + "type": "image", + "image_path": "d264e9d9b78f0d24469e8645a6781a8b7b70be1f55ed0a83e526bba6feb6e03f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 171, + 560, + 206 + ], + "lines": [ + { + "bbox": [ + 50, + 171, + 560, + 206 + ], + "spans": [ + { + "bbox": [ + 50, + 171, + 560, + 206 + ], + "type": "text", + "content": "Figure 1: An example sequential instruction following task with a real-world user. The red circles indicate the correct actions based on the user's spoken instructions. Sequential instructions introduce unique challenges, such as the need to retain and reason over past context. For instance, the instruction in step 3 requires information from step 1 to be correctly interpreted." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 78, + 216, + 521, + 329 + ], + "blocks": [ + { + "bbox": [ + 78, + 216, + 521, + 329 + ], + "lines": [ + { + "bbox": [ + 78, + 216, + 521, + 329 + ], + "spans": [ + { + "bbox": [ + 78, + 216, + 521, + 329 + ], + "type": "image", + "image_path": "d2e93543deec6e7df545c85e4659b87e11f9d991874be90009730826ed4e310e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 337, + 560, + 370 + ], + "lines": [ + { + "bbox": [ + 50, + 337, + 560, + 370 + ], + "spans": [ + { + "bbox": [ + 50, + 337, + 560, + 370 + ], + "type": "text", + "content": "Figure 2: Examples of general task categories (left) and websites visited (right) in RealWebAssist. The tasks span a wide range of real-world scenarios, from shopping to food & entertainment to travel planning, which encourages users to visit many different websites." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 392, + 294, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 392, + 294, + 437 + ], + "spans": [ + { + "bbox": [ + 50, + 392, + 294, + 437 + ], + "type": "text", + "content": "els. Experimental results reveal that these models lack several key abilities, including grounding, understanding user intents, reasoning about spatial and temporal context, and adapting to user-specific routines." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 447, + 212, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 447, + 212, + 459 + ], + "spans": [ + { + "bbox": [ + 132, + 447, + 212, + 459 + ], + "type": "text", + "content": "Related Works" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 462, + 293, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 462, + 293, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 462, + 293, + 704 + ], + "type": "text", + "content": "Web Agent Benchmarks. Existing web agent benchmarks primarily evaluate the performance of web agents on tasks with clearly defined, unambiguous instructions, often overlooking the complexities of real-world users' behavior and their instructions to an AI assistant. On WebArena (Zhou et al. 2023), Mind2Web (Deng et al. 2024), and WebShop (Yao et al. 2022), an agent follows a single instruction to perform an isolated task. While they offer an evaluation of an agent's planning capacity, they lack the evaluation of an agent's ability to follow a long sequence of user instructions on long-horizon web tasks. There have also been GUI grounding benchmarks, such as ScreenSpot (Cheng et al. 2024), that focused on grounding simple instructions to clicking actions on webpages. These instructions only instruct web agents to click web elements rather than reaching a user goal (e.g., purchasing an item). WebLINX (Lü, Kasner, and Reddy 2024) features sequential instruction following. However, the instructions were generated by annotators who received detailed guidelines and extensive training, rather than by actual users. The resulting instructions do not capture the nuances and complexity of real-world user instructions that naturally emerge in interactions with an as-" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 315, + 392, + 558, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 392, + 558, + 448 + ], + "spans": [ + { + "bbox": [ + 315, + 392, + 558, + 448 + ], + "type": "text", + "content": "sistent. In contrast, RealWebAssist consists of sequential instruction following tasks for assisting real-world users, providing a novel set of challenges necessary for long-horizon web assistance for real-world users. Table 1 summarizes key differences between RealWebAssist and prior benchmarks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 315, + 449, + 559, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 449, + 559, + 604 + ], + "spans": [ + { + "bbox": [ + 315, + 449, + 559, + 604 + ], + "type": "text", + "content": "Autonomous Web Agents. There have been many recent works on engineering autonomous web agents through retrieval augmented planning (Kim et al. 2024; Zhou et al. 2024; Wu et al. 2024a; He et al. 2024; Pan et al. 2024), finetuning (Hong et al. 2024; Gur et al. 2024; Deng et al. 2024; Pang et al. 2024; Zhang and Zhang 2024), learning workflows (Zhang et al. 2023; Wang et al. 2024; Zheng et al. 2024b; Majumder et al. 2023; Cai et al. 2024), reinforcement learning (Liu et al. 2018; Shi et al. 2017; Nogueira and Cho 2016; Humphreys et al. 2022), and combinations of these methods (Liu et al. 2023; Putta et al. 2024). These works focus on planning for a single task. However, there has not been much work on understanding and following real-world users' sequential instructions on long-horizon tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 605, + 559, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 605, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 315, + 605, + 559, + 704 + ], + "type": "text", + "content": "GUI Grounding. One key ability for web agents in many assistance tasks is to ground instructions to clicking actions on a webpage. Recent works have explored VLM finetuning (e.g., (Gou et al. 2024; Wu et al. 2024b; Yang et al. 2024, 2025; Wu et al. 2025; Qin et al. 2025; Xu et al. 2025; Yuan et al. 2025)) as well as prompting pretrained VLMs with segmentations of web elements (e.g., (Yang et al. 2023)) for enabling GUI grounding. These methods generate coordinates or bounding boxes on webpages to indicate where to click." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 52, + 504, + 169 + ], + "blocks": [ + { + "bbox": [ + 106, + 52, + 504, + 169 + ], + "lines": [ + { + "bbox": [ + 106, + 52, + 504, + 169 + ], + "spans": [ + { + "bbox": [ + 106, + 52, + 504, + 169 + ], + "type": "table", + "html": "
BenchmarkReal UserSequential InstructionsReal WebsitesGUI GroundingSpeech# Instructions
SreenSpot (Cheng et al. 2024)XXX1200+
WebArena (Zhou et al. 2023)XXXXX812
Mind2Web (Deng et al. 2024)XXXX2000+
WebLINX (Lù, Kasner, and Reddy 2024)XXX512
VideoWebArena (Jang et al. 2024)XXXX2021
WebShop (Yao et al. 2022)XXXXX12087
BearCubs (Song et al. 2025)XXXX111
RealWebAssist (Ours)1885
", + "image_path": "aba113943b6a24b652e0b42d4d023fff645dee9bacf04ec2456b975dd33c656f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 182, + 558, + 228 + ], + "lines": [ + { + "bbox": [ + 50, + 182, + 558, + 228 + ], + "spans": [ + { + "bbox": [ + 50, + 182, + 558, + 228 + ], + "type": "text", + "content": "Table 1: Comparison between RealWebAssist and existing web agent benchmarks on several key aspects: (1) whether instructions were given by real-world users instead of annotators, (2) whether there is a sequence of instructions, (3) whether there are real-world websites, (4) whether the agent needs to execute actions by selecting coordinates on webpages, (5) whether the instructions are speech instructions, and (6) the number of total instructions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 52, + 264, + 162, + 329 + ], + "blocks": [ + { + "bbox": [ + 59, + 251, + 156, + 262 + ], + "lines": [ + { + "bbox": [ + 59, + 251, + 156, + 262 + ], + "spans": [ + { + "bbox": [ + 59, + 251, + 156, + 262 + ], + "type": "text", + "content": "\"Ok, buy this item\"" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 52, + 264, + 162, + 329 + ], + "lines": [ + { + "bbox": [ + 52, + 264, + 162, + 329 + ], + "spans": [ + { + "bbox": [ + 52, + 264, + 162, + 329 + ], + "type": "image", + "image_path": "3262cf9379b85a21d2a4c5c9a55cea3280c0ffedb92bc61498969f0ae3157c4a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 168, + 264, + 277, + 329 + ], + "blocks": [ + { + "bbox": [ + 169, + 251, + 279, + 263 + ], + "lines": [ + { + "bbox": [ + 169, + 251, + 279, + 263 + ], + "spans": [ + { + "bbox": [ + 169, + 251, + 279, + 263 + ], + "type": "text", + "content": "\"Let's do All Airports\"" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 168, + 264, + 277, + 329 + ], + "lines": [ + { + "bbox": [ + 168, + 264, + 277, + 329 + ], + "spans": [ + { + "bbox": [ + 168, + 264, + 277, + 329 + ], + "type": "image", + "image_path": "5e15a46f4526020f333c8caa9d15eeec63cb8f6981f624f2ab01f59fc343101e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 338, + 293, + 373 + ], + "lines": [ + { + "bbox": [ + 50, + 338, + 293, + 373 + ], + "spans": [ + { + "bbox": [ + 50, + 338, + 293, + 373 + ], + "type": "text", + "content": "Figure 3: Multiple actions can satisfy a user's intent. A web agent's action is considered correct if the coordinate they provide is within one of the annotated correct regions." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 394, + 293, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 394, + 293, + 440 + ], + "spans": [ + { + "bbox": [ + 50, + 394, + 293, + 440 + ], + "type": "text", + "content": "They have only been trained on low-level instructions that clearly refer to web elements. It remains unclear if they can understand real-world user instructions that must be interpreted considering context or may refer to high-level goals." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 101, + 451, + 242, + 463 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 451, + 242, + 463 + ], + "spans": [ + { + "bbox": [ + 101, + 451, + 242, + 463 + ], + "type": "text", + "content": "RealWebAssist Benchmark" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 468, + 124, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 468, + 124, + 480 + ], + "spans": [ + { + "bbox": [ + 51, + 468, + 124, + 480 + ], + "type": "text", + "content": "Problem Setup" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 483, + 292, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 483, + 292, + 582 + ], + "spans": [ + { + "bbox": [ + 50, + 483, + 292, + 582 + ], + "type": "text", + "content": "RealWebAssist evaluates agents' ability to follow long-horizon, sequential web instructions to assist users with their high-level goals. In each task, a human user will try to reach an open-ended goal such as \"buy formal outfits for a formal event\" by instructing the assistant through a series of spoken instructions. The dataset is collected from interactions between human users and human assistants in a human experiment. To evaluate agents, we use the human assistants' actions to evaluate the agents' success." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 583, + 293, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 583, + 293, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 583, + 293, + 704 + ], + "type": "text", + "content": "In RealWebAssist, a web agent has access to the current instruction, webpage (as a screenshot), and all the past interactions (previous instructions & screenshots of webpages). Since we are focusing on tasks on real-world websites, it is challenging to ensure safety and reproducibility in an interactive evaluation setting. Therefore, we adopt an offline evaluation setting following prior web-based agent benchmarks with real websites (Deng et al. 2024; Cheng et al. 2024). Specifically, for each instruction collected from the human experiment, the agent needs to identify the correct element to interact with by providing a coordinate or a bound" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 249, + 558, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 249, + 558, + 316 + ], + "spans": [ + { + "bbox": [ + 315, + 249, + 558, + 316 + ], + "type": "text", + "content": "ing box to click on the webpage. As shown by figure 3, a web agent's action is considered correct if the coordinate or the center of the bounding box they provide falls in the annotated correct regions on the webpage. If there are multiple steps corresponding to one instruction, we evaluate if the web agent's actions for the same instruction are all correct." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 323, + 409, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 323, + 409, + 335 + ], + "spans": [ + { + "bbox": [ + 317, + 323, + 409, + 335 + ], + "type": "text", + "content": "Evaluation Metrics" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 337, + 502, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 337, + 502, + 349 + ], + "spans": [ + { + "bbox": [ + 317, + 337, + 502, + 349 + ], + "type": "text", + "content": "We consider the following evaluation metrics:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 320, + 351, + 558, + 460 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 320, + 351, + 558, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 351, + 558, + 373 + ], + "spans": [ + { + "bbox": [ + 320, + 351, + 558, + 373 + ], + "type": "text", + "content": "- Task success rate: A task is successful if the web agent can correctly produce actions for all instructions in a task." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 321, + 374, + 558, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 374, + 558, + 407 + ], + "spans": [ + { + "bbox": [ + 321, + 374, + 558, + 407 + ], + "type": "text", + "content": "- Average progress: We measure the progress of a task by the percentage of consecutive instructions the web agent can successfully perform before its first error in the task." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 321, + 407, + 558, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 407, + 558, + 460 + ], + "spans": [ + { + "bbox": [ + 321, + 407, + 558, + 460 + ], + "type": "text", + "content": "- Step success rate: We also consider a teacher forcing setting as a simpler, diagnostic evaluation, where the web agent will only need to follow the instruction at a single step of a task assuming all previous instructions have been successfully performed." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 316, + 470, + 419, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 470, + 419, + 481 + ], + "spans": [ + { + "bbox": [ + 316, + 470, + 419, + 481 + ], + "type": "text", + "content": "Dataset Construction" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 484, + 558, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 484, + 558, + 625 + ], + "spans": [ + { + "bbox": [ + 315, + 484, + 558, + 625 + ], + "type": "text", + "content": "Setup. We recruited 10 participants (4 female, 6 male, mean age = 20 years) from a US university campus, none of whom had prior knowledge of the study's purpose, to construct the dataset. All participants were native or fluent English speakers. Each participant completed a 40-minute real-world web assistance session in which they tackled a series of open-ended tasks designed to encourage diverse strategies. During each session, participants verbally instructed an experimenter, who operated the computer on their behalf, to complete the tasks. We captured screen recordings and used a high-quality USB microphone to record speech as raw data. The user study was approved by an institutional review board." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 627, + 558, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 627, + 558, + 704 + ], + "spans": [ + { + "bbox": [ + 315, + 627, + 558, + 704 + ], + "type": "text", + "content": "User Tasks. To increase the instruction diversity and realism, participants received general web-based tasks requiring active information seeking, sub-goal planning, and comparison among various options. We generated the task list by few-shot prompting GPT-4o with open-ended tasks, followed by manual filtering and editing to ensure task quality and feasibility. These tasks provide only general guidance," + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 54, + 293, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 54, + 293, + 99 + ], + "spans": [ + { + "bbox": [ + 51, + 54, + 293, + 99 + ], + "type": "text", + "content": "ensuring flexibility for personal decision-making. Example tasks include \"Purchase an outfit for a formal event\" and \"Plan a 5-day trip to Japan, booking both flights and hotels\". Each user finishes about 10 tasks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 99, + 293, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 99, + 293, + 153 + ], + "spans": [ + { + "bbox": [ + 51, + 99, + 293, + 153 + ], + "type": "text", + "content": "Emergent User Behavior. In our realistic, open-ended settings, users exhibit rich behaviors that are not present in previous benchmarks. These include, but are not limited to, information seeking, researching and comparing different options, change of mind, and trial-and-error." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 153, + 293, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 153, + 293, + 384 + ], + "spans": [ + { + "bbox": [ + 51, + 153, + 293, + 384 + ], + "type": "text", + "content": "Annotations. We manually labeled RealWebAssist data to ensure high-quality annotations. We first segmented the full recording into individual clips corresponding to each user's instructions. In our benchmark, we disregard user speech unrelated to explicit instructions for the assistant, such as filler words or verbalized thought processes. For each instruction, we provide raw speech, speech transcript, webpage, and the correct regions to click (in the form of one or more bounding boxes). When there were multiple correct answers for the instructions (for instance, \"can you close all the current tabs\"), we annotated all correct regions with multiple bounding boxes. When the experimenter made a mistake during the data collection sessions, we annotated the correct action intended by the user. If an instruction required multiple steps to complete, we set the instruction at each step as the same instruction. To generate the text instructions, we used an off-the-shelf recognition model, Whisper Large-V3 (Radford et al. 2023), to transcribe users' speech and then manually fixed transcription errors. For all the instructions, we have three annotators verifying all of them, ensuring " + }, + { + "bbox": [ + 51, + 153, + 293, + 384 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 51, + 153, + 293, + 384 + ], + "type": "text", + "content": " agreement." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 384, + 293, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 384, + 293, + 429 + ], + "spans": [ + { + "bbox": [ + 51, + 384, + 293, + 429 + ], + "type": "text", + "content": "Dataset Statistics. RealWebAssist contains 1,885 user instructions across 107 tasks, 66 websites, and 2,524 screenshots. In addition to the benchmark, we also plan to release the raw data, consisting of over 6 hours of video & audio." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 436, + 127, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 436, + 127, + 449 + ], + "spans": [ + { + "bbox": [ + 52, + 436, + 127, + 449 + ], + "type": "text", + "content": "Key Challenges" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 451, + 293, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 451, + 293, + 616 + ], + "spans": [ + { + "bbox": [ + 51, + 451, + 293, + 616 + ], + "type": "text", + "content": "RealWebAssist features multiple challenges as illustrated in Figure 4, including spatial and temporal reasoning needed to understand ambiguous and context-dependent user instructions, planning for multiple steps of actions to reach the goal communicated by an instruction, and learning about user-specific routines. These key challenges provide a more realistic and holistic evaluation of a web agent's reasoning, planning, and learning abilities to assist real-world users on long-horizon tasks. It is worth noting that many of these challenges, in particular, spatial reasoning, temporal reasoning, and routine understanding, are not present in existing web agent benchmarks. Unlike RealWebAssist, prior benchmarks, such as ScreenSpot (Cheng et al. 2024), WebArena (Zhou et al. 2023), and Mind2Web (Deng et al. 2024), only include clear, unambiguous, and non-sequential instructions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 616, + 293, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 616, + 293, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 616, + 293, + 704 + ], + "type": "text", + "content": "Spatial Reasoning. When referring to one of the elements on a webpage, real-world users tend to use a concise instruction that can be understood conditioned on spatial context instead of an overly elaborated instruction. For instance, when instructing an assistant to buy a product, users may give short instructions such as \"select the cheapest one,\" instead of describing the desired product in detail. Figure 4A depicts different types of spatial reasoning that rely on di" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 316, + 55, + 558, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 55, + 558, + 120 + ], + "spans": [ + { + "bbox": [ + 316, + 55, + 558, + 120 + ], + "type": "text", + "content": "verse spatial contexts, including ranking, spatial relations, and overall website functionalities. It is worth noting that these instructions may sometimes reveal users' preferences (e.g., preferred seating), providing additional information for the web agent to provide potentially more customized assistance in the future." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 135, + 558, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 135, + 558, + 334 + ], + "spans": [ + { + "bbox": [ + 316, + 135, + 558, + 334 + ], + "type": "text", + "content": "Temporal Reasoning. In our sequential instruction following tasks, users may instruct an assistant with the history as an assumed temporal context. For example, to understand the intended meaning of \"click the last item,\" the assistant must memorize the items the user has viewed in the past. Figure 4B shows temporal reasoning based on different kinds of temporal context, ranging from short context between two consecutive webpages to long context with the same website to long context across websites. From the temporal context, the assistant needs to memorize crucial elements in the previous webpages, infer and track a user's mind (e.g., change of mind about what to buy) based on the past instructions and webpages, and identify the earlier webpage the user refers to. Such temporal reasoning has not been evaluated in prior web agent benchmarks. However, it is very common in our benchmark due to the nature of human web browsing behavior as well as human instructions guided by pragmatics (Goodman and Frank 2016)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 316, + 348, + 559, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 348, + 559, + 469 + ], + "spans": [ + { + "bbox": [ + 316, + 348, + 559, + 469 + ], + "type": "text", + "content": "Multi-step Planning. Many instructions require multiple steps to complete. In these cases, the assistant needs to interpret the goal implied by the instruction and plan a sequence of actions to achieve that goal. This goes beyond grounding the instruction to a single action on the current webpage. Figure 4C shows an example where the agent was asked to repeat the same order on another food delivery website to check if the price would be different. A successful execution of this instruction would require the agent to first understand what the order is to ground the goal on the current website and generate a successful multi-step plan." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 316, + 484, + 559, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 484, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 316, + 484, + 559, + 704 + ], + "type": "text", + "content": "Routine. Since our benchmark allows a user to engage in repeated interactions with an assistant over multiple tasks, we observe that users may define routines understood by the assistant after repeated interactions. As shown in Figure 4D, the user initially gave detailed step-by-step instructions when selecting arrival and departure dates for a flight. In a subsequent task, however, the user simplified them into a single instruction when selecting dates for a hotel room. Such shorter instructions become possible after establishing a routine in the earlier task. Cognitive studies found that procedural abstraction, like these routines, naturally emerges in human cooperative communication through repeated interactions, allowing more efficient communication with partners (McCarthy et al. 2021). The emergence of such routines in our benchmark poses a novel challenge for web agents—learning user-specific procedural abstraction via repeated interactions to achieve human-like adaptive assistance. We hypothesize that this ability could enhance users' perception of the AI assistant, as it understands human cooperative communication." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 58, + 197, + 76 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 58, + 197, + 76 + ], + "spans": [ + { + "bbox": [ + 61, + 58, + 197, + 76 + ], + "type": "text", + "content": "A Spatial Reasoning" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 69, + 110, + 143, + 157 + ], + "blocks": [ + { + "bbox": [ + 79, + 93, + 129, + 107 + ], + "lines": [ + { + "bbox": [ + 79, + 93, + 129, + 107 + ], + "spans": [ + { + "bbox": [ + 79, + 93, + 129, + 107 + ], + "type": "text", + "content": "\"Can you click on the seventh tab?\"" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 69, + 110, + 143, + 157 + ], + "lines": [ + { + "bbox": [ + 69, + 110, + 143, + 157 + ], + "spans": [ + { + "bbox": [ + 69, + 110, + 143, + 157 + ], + "type": "image", + "image_path": "ba5e71d4116049f60b5bc9cd16dfe9a18244ef18f0bc79b68f33743507ec46fb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 159, + 110, + 246, + 157 + ], + "blocks": [ + { + "bbox": [ + 131, + 77, + 168, + 89 + ], + "lines": [ + { + "bbox": [ + 131, + 77, + 168, + 89 + ], + "spans": [ + { + "bbox": [ + 131, + 77, + 168, + 89 + ], + "type": "text", + "content": "Ranking" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 170, + 93, + 231, + 108 + ], + "lines": [ + { + "bbox": [ + 170, + 93, + 231, + 108 + ], + "spans": [ + { + "bbox": [ + 170, + 93, + 231, + 108 + ], + "type": "text", + "content": "\"And let's just get the lowest price tickets\"" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 159, + 110, + 246, + 157 + ], + "lines": [ + { + "bbox": [ + 159, + 110, + 246, + 157 + ], + "spans": [ + { + "bbox": [ + 159, + 110, + 246, + 157 + ], + "type": "image", + "image_path": "2eead0b82df55cb899f35efdc9503c8c8ebdfc8a7ecc837bd3f5b544ab79cf9d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 264, + 110, + 345, + 156 + ], + "blocks": [ + { + "bbox": [ + 323, + 79, + 397, + 90 + ], + "lines": [ + { + "bbox": [ + 323, + 79, + 397, + 90 + ], + "spans": [ + { + "bbox": [ + 323, + 79, + 397, + 90 + ], + "type": "text", + "content": "Spatial relations" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 269, + 92, + 339, + 107 + ], + "lines": [ + { + "bbox": [ + 269, + 92, + 339, + 107 + ], + "spans": [ + { + "bbox": [ + 269, + 92, + 339, + 107 + ], + "type": "text", + "content": "\"Can you click the arrow between the two\"" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 264, + 110, + 345, + 156 + ], + "lines": [ + { + "bbox": [ + 264, + 110, + 345, + 156 + ], + "spans": [ + { + "bbox": [ + 264, + 110, + 345, + 156 + ], + "type": "image", + "image_path": "7a41bced840724c2c6501da63e165f94557a7e0f02f89d9734f7562716e63e71.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 361, + 110, + 442, + 156 + ], + "blocks": [ + { + "bbox": [ + 374, + 92, + 430, + 107 + ], + "lines": [ + { + "bbox": [ + 374, + 92, + 430, + 107 + ], + "spans": [ + { + "bbox": [ + 374, + 92, + 430, + 107 + ], + "type": "text", + "content": "Only select the two seats on the top" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 361, + 110, + 442, + 156 + ], + "lines": [ + { + "bbox": [ + 361, + 110, + 442, + 156 + ], + "spans": [ + { + "bbox": [ + 361, + 110, + 442, + 156 + ], + "type": "image", + "image_path": "d855e2cf8493411bd282771d1d34a5bab6d962d3dd3397a281310a818ed5544c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 462, + 109, + 539, + 156 + ], + "blocks": [ + { + "bbox": [ + 459, + 78, + 540, + 88 + ], + "lines": [ + { + "bbox": [ + 459, + 78, + 540, + 88 + ], + "spans": [ + { + "bbox": [ + 459, + 78, + 540, + 88 + ], + "type": "text", + "content": "Website functions" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 470, + 93, + 531, + 107 + ], + "lines": [ + { + "bbox": [ + 470, + 93, + 531, + 107 + ], + "spans": [ + { + "bbox": [ + 470, + 93, + 531, + 107 + ], + "type": "text", + "content": "\"Change the end date from 20 to 22nd\"" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 462, + 109, + 539, + 156 + ], + "lines": [ + { + "bbox": [ + 462, + 109, + 539, + 156 + ], + "spans": [ + { + "bbox": [ + 462, + 109, + 539, + 156 + ], + "type": "image", + "image_path": "23a81543b1cddff082f6d87c6f256bc893c2a538cd4253b22058339db2d13e45.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 61, + 160, + 212, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 160, + 212, + 178 + ], + "spans": [ + { + "bbox": [ + 61, + 160, + 212, + 178 + ], + "type": "text", + "content": "B Temporal Reasoning" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 66, + 204, + 143, + 253 + ], + "blocks": [ + { + "bbox": [ + 110, + 179, + 192, + 191 + ], + "lines": [ + { + "bbox": [ + 110, + 179, + 192, + 191 + ], + "spans": [ + { + "bbox": [ + 110, + 179, + 192, + 191 + ], + "type": "text", + "content": "Previous webpage" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 74, + 194, + 141, + 202 + ], + "lines": [ + { + "bbox": [ + 74, + 194, + 141, + 202 + ], + "spans": [ + { + "bbox": [ + 74, + 194, + 141, + 202 + ], + "type": "text", + "content": "\"Goto the previous tab\"" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 66, + 204, + 143, + 253 + ], + "lines": [ + { + "bbox": [ + 66, + 204, + 143, + 253 + ], + "spans": [ + { + "bbox": [ + 66, + 204, + 143, + 253 + ], + "type": "image", + "image_path": "63902a9f6317abf275642953e8b40809c46f12a3b365a3c43d271fe3a613ffd9.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 159, + 204, + 241, + 251 + ], + "blocks": [ + { + "bbox": [ + 170, + 194, + 236, + 202 + ], + "lines": [ + { + "bbox": [ + 170, + 194, + 236, + 202 + ], + "spans": [ + { + "bbox": [ + 170, + 194, + 236, + 202 + ], + "type": "text", + "content": "\"No, stay on that page\"" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 159, + 204, + 241, + 251 + ], + "lines": [ + { + "bbox": [ + 159, + 204, + 241, + 251 + ], + "spans": [ + { + "bbox": [ + 159, + 204, + 241, + 251 + ], + "type": "image", + "image_path": "cb5d8e54ab382218c82963c419815a5545b40036eb8a04989b805fb5a8943dca.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 260, + 203, + 340, + 253 + ], + "blocks": [ + { + "bbox": [ + 311, + 179, + 478, + 191 + ], + "lines": [ + { + "bbox": [ + 311, + 179, + 478, + 191 + ], + "spans": [ + { + "bbox": [ + 311, + 179, + 478, + 191 + ], + "type": "text", + "content": "Long context within the same website" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 272, + 194, + 329, + 201 + ], + "lines": [ + { + "bbox": [ + 272, + 194, + 329, + 201 + ], + "spans": [ + { + "bbox": [ + 272, + 194, + 329, + 201 + ], + "type": "text", + "content": "\"Click on HP laptop\"" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 260, + 203, + 340, + 253 + ], + "lines": [ + { + "bbox": [ + 260, + 203, + 340, + 253 + ], + "spans": [ + { + "bbox": [ + 260, + 203, + 340, + 253 + ], + "type": "image", + "image_path": "9a13fc77395a1b436583c1dc8a2907791f0c87b3cee6c2fd0a7775d0ee8bcce6.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 217, + 255, + 386, + 266 + ], + "lines": [ + { + "bbox": [ + 217, + 255, + 386, + 266 + ], + "spans": [ + { + "bbox": [ + 217, + 255, + 386, + 266 + ], + "type": "text", + "content": "Long context across multiple websites" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 365, + 203, + 442, + 252 + ], + "blocks": [ + { + "bbox": [ + 370, + 194, + 436, + 202 + ], + "lines": [ + { + "bbox": [ + 370, + 194, + 436, + 202 + ], + "spans": [ + { + "bbox": [ + 370, + 194, + 436, + 202 + ], + "type": "text", + "content": "\"Can you check ASUS?\"" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 365, + 203, + 442, + 252 + ], + "lines": [ + { + "bbox": [ + 365, + 203, + 442, + 252 + ], + "spans": [ + { + "bbox": [ + 365, + 203, + 442, + 252 + ], + "type": "image", + "image_path": "3a00d16673abd9bdbb7dc446ffd3e9a5cef52dfef2decba6e9b846a3e3006f18.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 471, + 203, + 542, + 252 + ], + "blocks": [ + { + "bbox": [ + 463, + 194, + 545, + 202 + ], + "lines": [ + { + "bbox": [ + 463, + 194, + 545, + 202 + ], + "spans": [ + { + "bbox": [ + 463, + 194, + 545, + 202 + ], + "type": "text", + "content": "\"Go back to the other laptop\"" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 471, + 203, + 542, + 252 + ], + "lines": [ + { + "bbox": [ + 471, + 203, + 542, + 252 + ], + "spans": [ + { + "bbox": [ + 471, + 203, + 542, + 252 + ], + "type": "image", + "image_path": "8f9ec67c1254812380c80b3c73fa1c66e3a246c33c0fee2617f7b7621f8fb749.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 64, + 286, + 143, + 334 + ], + "blocks": [ + { + "bbox": [ + 73, + 269, + 130, + 283 + ], + "lines": [ + { + "bbox": [ + 73, + 269, + 130, + 283 + ], + "spans": [ + { + "bbox": [ + 73, + 269, + 130, + 283 + ], + "type": "text", + "content": "\"Can you look at the next tab as well?\"" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 64, + 286, + 143, + 334 + ], + "lines": [ + { + "bbox": [ + 64, + 286, + 143, + 334 + ], + "spans": [ + { + "bbox": [ + 64, + 286, + 143, + 334 + ], + "type": "image", + "image_path": "62cf7409464e0df9224fbc514bcfada2521f09cde0a5801f98f118c975b7f24f.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 96, + 335, + 110, + 347 + ], + "blocks": [ + { + "bbox": [ + 96, + 335, + 110, + 347 + ], + "lines": [ + { + "bbox": [ + 96, + 335, + 110, + 347 + ], + "spans": [ + { + "bbox": [ + 96, + 335, + 110, + 347 + ], + "type": "image", + "image_path": "e47eaaa5ae2da59bed97e795c4703cf3c7e3ddd2ecd60129652f86f4bc7e9674.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 143, + 286, + 264, + 334 + ], + "blocks": [ + { + "bbox": [ + 164, + 269, + 246, + 284 + ], + "lines": [ + { + "bbox": [ + 164, + 269, + 246, + 284 + ], + "spans": [ + { + "bbox": [ + 164, + 269, + 246, + 284 + ], + "type": "text", + "content": "\"Oh, this is like 95 bucks. Can you press the other tab\"" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 143, + 286, + 264, + 334 + ], + "lines": [ + { + "bbox": [ + 143, + 286, + 264, + 334 + ], + "spans": [ + { + "bbox": [ + 143, + 286, + 264, + 334 + ], + "type": "image", + "image_path": "304fb0a3a410fe3d67552f171bcbdaf6ecdaff35d16ea1f23c4ff48f91fa6670.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 182, + 335, + 223, + 346 + ], + "lines": [ + { + "bbox": [ + 182, + 335, + 223, + 346 + ], + "spans": [ + { + "bbox": [ + 182, + 335, + 223, + 346 + ], + "type": "text", + "content": "CityPASS" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 263, + 286, + 342, + 334 + ], + "blocks": [ + { + "bbox": [ + 266, + 269, + 335, + 284 + ], + "lines": [ + { + "bbox": [ + 266, + 269, + 335, + 284 + ], + "spans": [ + { + "bbox": [ + 266, + 269, + 335, + 284 + ], + "type": "text", + "content": "\"OK, can you open a new tab and search for ...\"" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 263, + 286, + 342, + 334 + ], + "lines": [ + { + "bbox": [ + 263, + 286, + 342, + 334 + ], + "spans": [ + { + "bbox": [ + 263, + 286, + 342, + 334 + ], + "type": "image", + "image_path": "201d9e2f83e91b7ffbe99cf13a4b2f44ec488e6b23d6d455f891891130e95907.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 297, + 335, + 310, + 348 + ], + "blocks": [ + { + "bbox": [ + 297, + 335, + 310, + 348 + ], + "lines": [ + { + "bbox": [ + 297, + 335, + 310, + 348 + ], + "spans": [ + { + "bbox": [ + 297, + 335, + 310, + 348 + ], + "type": "image", + "image_path": "8546b193f6d0948328afa144cb4d9752a6d699f8d9b30d3fcd79c63fc862dbcf.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 342, + 286, + 443, + 334 + ], + "blocks": [ + { + "bbox": [ + 361, + 269, + 446, + 283 + ], + "lines": [ + { + "bbox": [ + 361, + 269, + 446, + 283 + ], + "spans": [ + { + "bbox": [ + 361, + 269, + 446, + 283 + ], + "type": "text", + "content": "\"This is 36. Can you go back to CN Tower's official website\"" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 342, + 286, + 443, + 334 + ], + "lines": [ + { + "bbox": [ + 342, + 286, + 443, + 334 + ], + "spans": [ + { + "bbox": [ + 342, + 286, + 443, + 334 + ], + "type": "image", + "image_path": "1eed3cbba66d7499656e171b12493d30247b2ce027294fbe4eca315736ec9e3a.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 396, + 335, + 413, + 350 + ], + "blocks": [ + { + "bbox": [ + 396, + 335, + 413, + 350 + ], + "lines": [ + { + "bbox": [ + 396, + 335, + 413, + 350 + ], + "spans": [ + { + "bbox": [ + 396, + 335, + 413, + 350 + ], + "type": "image", + "image_path": "01d4f6a679a498b0e3393f2d373eb46be9fc4e2c1ef4a99929fdd0f7130bbbfd.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 468, + 286, + 542, + 334 + ], + "blocks": [ + { + "bbox": [ + 475, + 269, + 533, + 284 + ], + "lines": [ + { + "bbox": [ + 475, + 269, + 533, + 284 + ], + "spans": [ + { + "bbox": [ + 475, + 269, + 533, + 284 + ], + "type": "text", + "content": "\"I'd probably get the city pass option\"" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 468, + 286, + 542, + 334 + ], + "lines": [ + { + "bbox": [ + 468, + 286, + 542, + 334 + ], + "spans": [ + { + "bbox": [ + 468, + 286, + 542, + 334 + ], + "type": "image", + "image_path": "723238ade76f571ce35b12a7b4568326c588d00e3cdcc48105287acc73f8c99e.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 500, + 335, + 514, + 349 + ], + "blocks": [ + { + "bbox": [ + 500, + 335, + 514, + 349 + ], + "lines": [ + { + "bbox": [ + 500, + 335, + 514, + 349 + ], + "spans": [ + { + "bbox": [ + 500, + 335, + 514, + 349 + ], + "type": "image", + "image_path": "1adfabb0cb91990ce5bddb0cf1cc7badf04e77434e247af1898187b45eb832b4.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + } + ], + "index": 42 + }, + { + "bbox": [ + 61, + 355, + 210, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 355, + 210, + 371 + ], + "spans": [ + { + "bbox": [ + 61, + 355, + 210, + 371 + ], + "type": "text", + "content": "C Multi-step planning" + } + ] + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 132, + 388, + 212, + 429 + ], + "blocks": [ + { + "bbox": [ + 133, + 372, + 239, + 387 + ], + "lines": [ + { + "bbox": [ + 133, + 372, + 239, + 387 + ], + "spans": [ + { + "bbox": [ + 133, + 372, + 239, + 387 + ], + "type": "text", + "content": "\"Can you go to DoorDash and order the same thing to compare the price?\"" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 59, + 390, + 131, + 427 + ], + "lines": [ + { + "bbox": [ + 59, + 390, + 131, + 427 + ], + "spans": [ + { + "bbox": [ + 59, + 390, + 131, + 427 + ], + "type": "text", + "content": "History (not shown here): The user previously ordered Snooze melt from Meltdown and selected French Fries" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 132, + 388, + 212, + 429 + ], + "lines": [ + { + "bbox": [ + 132, + 388, + 212, + 429 + ], + "spans": [ + { + "bbox": [ + 132, + 388, + 212, + 429 + ], + "type": "image", + "image_path": "866645a1921b87066ab8f5f51fe034ee26f062d6eabfb1a6d939229e7b2578e6.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + } + ], + "index": 46 + }, + { + "type": "image", + "bbox": [ + 232, + 391, + 299, + 430 + ], + "blocks": [ + { + "bbox": [ + 232, + 391, + 299, + 430 + ], + "lines": [ + { + "bbox": [ + 232, + 391, + 299, + 430 + ], + "spans": [ + { + "bbox": [ + 232, + 391, + 299, + 430 + ], + "type": "image", + "image_path": "4f1154756e2c1ea4c745646e35c604eedd93c1b1fcccdb866bdd547aa94e8897.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 325, + 388, + 385, + 430 + ], + "blocks": [ + { + "bbox": [ + 325, + 388, + 385, + 430 + ], + "lines": [ + { + "bbox": [ + 325, + 388, + 385, + 430 + ], + "spans": [ + { + "bbox": [ + 325, + 388, + 385, + 430 + ], + "type": "image", + "image_path": "c76f097e0d7a8be85904af9e0ba26cc8688b241d6345fd0e81205598c4cfec7a.jpg" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_body" + } + ], + "index": 48 + }, + { + "type": "image", + "bbox": [ + 406, + 388, + 462, + 429 + ], + "blocks": [ + { + "bbox": [ + 406, + 388, + 462, + 429 + ], + "lines": [ + { + "bbox": [ + 406, + 388, + 462, + 429 + ], + "spans": [ + { + "bbox": [ + 406, + 388, + 462, + 429 + ], + "type": "image", + "image_path": "0bda5b47032a5ddbecdab3f3d2d1a16c54e28d78bceb97b1b655b1b06d0f4b3a.jpg" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_body" + } + ], + "index": 49 + }, + { + "type": "image", + "bbox": [ + 482, + 388, + 541, + 430 + ], + "blocks": [ + { + "bbox": [ + 482, + 388, + 541, + 430 + ], + "lines": [ + { + "bbox": [ + 482, + 388, + 541, + 430 + ], + "spans": [ + { + "bbox": [ + 482, + 388, + 541, + 430 + ], + "type": "image", + "image_path": "b44e9d6483d35381371556f2f7874777315f6c1b282d7f8ece0c0bb172957c07.jpg" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_body" + } + ], + "index": 50 + }, + { + "bbox": [ + 62, + 437, + 132, + 450 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 437, + 132, + 450 + ], + "spans": [ + { + "bbox": [ + 62, + 437, + 132, + 450 + ], + "type": "text", + "content": "D Routine" + } + ] + } + ], + "index": 51 + }, + { + "type": "image", + "bbox": [ + 135, + 447, + 208, + 494 + ], + "blocks": [ + { + "bbox": [ + 135, + 437, + 208, + 445 + ], + "lines": [ + { + "bbox": [ + 135, + 437, + 208, + 445 + ], + "spans": [ + { + "bbox": [ + 135, + 437, + 208, + 445 + ], + "type": "text", + "content": "\"Can we go to the dates?\"" + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 135, + 447, + 208, + 494 + ], + "lines": [ + { + "bbox": [ + 135, + 447, + 208, + 494 + ], + "spans": [ + { + "bbox": [ + 135, + 447, + 208, + 494 + ], + "type": "image", + "image_path": "13d42bd21ef1b84253b992a925229d3a9601fb7eb54415e8caeb8b799ee8b7b6.jpg" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 155, + 495, + 239, + 502 + ], + "lines": [ + { + "bbox": [ + 155, + 495, + 239, + 502 + ], + "spans": [ + { + "bbox": [ + 155, + 495, + 239, + 502 + ], + "type": "text", + "content": "\"And for dates do 3.17 to 3.21\"" + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 464, + 123, + 489 + ], + "lines": [ + { + "bbox": [ + 67, + 464, + 123, + 489 + ], + "spans": [ + { + "bbox": [ + 67, + 464, + 123, + 489 + ], + "type": "text", + "content": "Earlier task: select dates for a round-trip flight" + } + ] + } + ], + "index": 61, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 512, + 132, + 528 + ], + "lines": [ + { + "bbox": [ + 67, + 512, + 132, + 528 + ], + "spans": [ + { + "bbox": [ + 67, + 512, + 132, + 528 + ], + "type": "text", + "content": "Later task: select dates for a hotel stay" + } + ] + } + ], + "index": 62, + "angle": 0, + "type": "image_caption" + } + ], + "index": 53 + }, + { + "type": "image", + "bbox": [ + 244, + 447, + 312, + 493 + ], + "blocks": [ + { + "bbox": [ + 242, + 437, + 314, + 445 + ], + "lines": [ + { + "bbox": [ + 242, + 437, + 314, + 445 + ], + "spans": [ + { + "bbox": [ + 242, + 437, + 314, + 445 + ], + "type": "text", + "content": "\"Can we select April 7th?\"" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 244, + 447, + 312, + 493 + ], + "lines": [ + { + "bbox": [ + 244, + 447, + 312, + 493 + ], + "spans": [ + { + "bbox": [ + 244, + 447, + 312, + 493 + ], + "type": "image", + "image_path": "b930b1c1eb8244d7e57f1f9e3f48151543e6440e4c9c22d1b7738667ce567590.jpg" + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_body" + } + ], + "index": 56 + }, + { + "type": "image", + "bbox": [ + 359, + 446, + 428, + 493 + ], + "blocks": [ + { + "bbox": [ + 362, + 437, + 422, + 444 + ], + "lines": [ + { + "bbox": [ + 362, + 437, + 422, + 444 + ], + "spans": [ + { + "bbox": [ + 362, + 437, + 422, + 444 + ], + "type": "text", + "content": "\"And then April 14th\"" + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 359, + 446, + 428, + 493 + ], + "lines": [ + { + "bbox": [ + 359, + 446, + 428, + 493 + ], + "spans": [ + { + "bbox": [ + 359, + 446, + 428, + 493 + ], + "type": "image", + "image_path": "8022060c99b56e9562de56293f5888d82c66f54c2b561b33fb7e638919355843.jpg" + } + ] + } + ], + "index": 58, + "angle": 0, + "type": "image_body" + } + ], + "index": 58 + }, + { + "type": "image", + "bbox": [ + 472, + 445, + 545, + 493 + ], + "blocks": [ + { + "bbox": [ + 485, + 437, + 528, + 444 + ], + "lines": [ + { + "bbox": [ + 485, + 437, + 528, + 444 + ], + "spans": [ + { + "bbox": [ + 485, + 437, + 528, + 444 + ], + "type": "text", + "content": "\"And hit done\"" + } + ] + } + ], + "index": 59, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 472, + 445, + 545, + 493 + ], + "lines": [ + { + "bbox": [ + 472, + 445, + 545, + 493 + ], + "spans": [ + { + "bbox": [ + 472, + 445, + 545, + 493 + ], + "type": "image", + "image_path": "9f7d7479bdb7dfe1cbc2f6af2434f79d35b23ee13e316a307b48b24a38f0979c.jpg" + } + ] + } + ], + "index": 60, + "angle": 0, + "type": "image_body" + } + ], + "index": 60 + }, + { + "type": "image", + "bbox": [ + 158, + 503, + 230, + 548 + ], + "blocks": [ + { + "bbox": [ + 158, + 503, + 230, + 548 + ], + "lines": [ + { + "bbox": [ + 158, + 503, + 230, + 548 + ], + "spans": [ + { + "bbox": [ + 158, + 503, + 230, + 548 + ], + "type": "image", + "image_path": "3582dddae1e60403cc5a6b850ad94c9bf2fadd65918f9cdaa0af5f00f8e90161.jpg" + } + ] + } + ], + "index": 63, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 561, + 558, + 585 + ], + "lines": [ + { + "bbox": [ + 50, + 561, + 558, + 585 + ], + "spans": [ + { + "bbox": [ + 50, + 561, + 558, + 585 + ], + "type": "text", + "content": "Figure 4: Key challenges introduced by RealWebAssist: (A) spatial reasoning, (B) temporal reasoning, (C) multi-step planning, and (D) learning user-specific routines." + } + ] + } + ], + "index": 67, + "angle": 0, + "type": "image_caption" + } + ], + "index": 63 + }, + { + "bbox": [ + 156, + 495, + 239, + 502 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 495, + 239, + 502 + ], + "spans": [ + { + "bbox": [ + 156, + 495, + 239, + 502 + ], + "type": "text", + "content": "\"And for dates do 3.17 to 3.21\"" + } + ] + } + ], + "index": 64, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 308, + 502, + 381, + 547 + ], + "blocks": [ + { + "bbox": [ + 308, + 502, + 381, + 547 + ], + "lines": [ + { + "bbox": [ + 308, + 502, + 381, + 547 + ], + "spans": [ + { + "bbox": [ + 308, + 502, + 381, + 547 + ], + "type": "image", + "image_path": "0ecfc95aeb49af0705291a27e30780180994aefb82a293996b90508495262b86.jpg" + } + ] + } + ], + "index": 65, + "angle": 0, + "type": "image_body" + } + ], + "index": 65 + }, + { + "type": "image", + "bbox": [ + 458, + 502, + 533, + 547 + ], + "blocks": [ + { + "bbox": [ + 458, + 502, + 533, + 547 + ], + "lines": [ + { + "bbox": [ + 458, + 502, + 533, + 547 + ], + "spans": [ + { + "bbox": [ + 458, + 502, + 533, + 547 + ], + "type": "image", + "image_path": "9c19821c5df1f6bd8f461904cde24337092636ed53ed5bafbef20c8cbd7b1197.jpg" + } + ] + } + ], + "index": 66, + "angle": 0, + "type": "image_body" + } + ], + "index": 66 + }, + { + "bbox": [ + 138, + 605, + 206, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 605, + 206, + 618 + ], + "spans": [ + { + "bbox": [ + 138, + 605, + 206, + 618 + ], + "type": "text", + "content": "Experiments" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 51, + 621, + 97, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 621, + 97, + 632 + ], + "spans": [ + { + "bbox": [ + 51, + 621, + 97, + 632 + ], + "type": "text", + "content": "Baselines" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 50, + 637, + 293, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 637, + 293, + 682 + ], + "spans": [ + { + "bbox": [ + 50, + 637, + 293, + 682 + ], + "type": "text", + "content": "We evaluated several types of models for web agents commonly evaluated in existing web agent benchmarks that have real-world websites (i.e., offline evaluation). For all the experiments, we use the ground-truth captions for instructions." + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 51, + 682, + 293, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 682, + 293, + 704 + ], + "spans": [ + { + "bbox": [ + 51, + 682, + 293, + 704 + ], + "type": "text", + "content": "GUI Grounding Models. GUI grounding models directly translate an instruction to an action on a webpage. There are" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 315, + 605, + 559, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 605, + 559, + 694 + ], + "spans": [ + { + "bbox": [ + 315, + 605, + 559, + 694 + ], + "type": "text", + "content": "two general types of grounding models. First, Set-of-Mark (SoM) (Yang et al. 2023) segments salient elements on a webpage using an off-the-shelf segmentation model (e.g., SAM (Kirillov et al. 2023) and Semantic-SAM (Li et al. 2023)) and prompts a VLM to select a segment mask to identify the clicking area corresponding to the given instruction. Second, VLMs finetuned on datasets with paired instructions and annotated clicking coordinates or bounding" + } + ] + } + ], + "index": 72 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 54, + 293, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 54, + 293, + 99 + ], + "spans": [ + { + "bbox": [ + 51, + 54, + 293, + 99 + ], + "type": "text", + "content": "boxes. We evaluated UGround-V1 (Gou et al. 2024), OSAtlas (Wu et al. 2024b), Aria-UI (Yang et al. 2024), GTA-1 (Yang et al. 2025), GUI-Actor (Wu et al. 2024a), and UI-TARS (Qin et al. 2025)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 99, + 293, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 99, + 293, + 393 + ], + "spans": [ + { + "bbox": [ + 51, + 99, + 293, + 393 + ], + "type": "text", + "content": "VLM/LRM + Grounding. Grounding models are designed or trained to ground a simple instruction to a webpage and thus tend to lack reasoning or planning capabilities. To address this, we leveraged VLMs and LRMs to first translate real user instructions to more understandable ones for grounding models. In particular, a VLM or an LRM needs to reason about the true user intent implied by the instruction and the spatial & temporal context. For instructions that require multiple actions, it needs to generate a plan to complete the instructions. Finally, it needs to generate a straightforward, clear instruction for the grounding model to produce the final action at each step. We evaluated state-of-the-art VLMs (OpenAI 2023; Team 2025; Qwen et al. 2025), as well as state-of-the-art LRMs (Jaech et al. 2024; Team 2025; Anthropic 2025). In the main results, we paired each VLM and LRM with the grounding model that achieved the highest step accuracy (GTA-1). For all VLMs and LRMs, we provide the past 10 steps for context, which we found to be a reasonable fixed context length in our preliminary study, balancing cost and informativeness. We also found that prompting models with screenshots of past webpages could incur a high cost. Therefore, we only prompt the models with the screenshot of the current webpage. For the history, we prompted GPT-4o to generate text-based action history based on consecutive screenshots and the instructions at each step. We then used this text-based history description for the evaluated VLMs and LRMs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 395, + 293, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 395, + 293, + 526 + ], + "spans": [ + { + "bbox": [ + 50, + 395, + 293, + 526 + ], + "type": "text", + "content": "Finetuning. To evaluate whether models can learn to better follow real-world user instructions with additional training, we finetuned the best-performing grounding model (GTA-1) following the model's original group relative policy optimization (GRPO) training procedure (Yang et al. 2025) on 9 participants' data and tested it on the held-out participants' instructions. Specifically, we trained the grounding model to produce an action based on the past 10 steps of actions (in text), the current webpage screenshot, and the instruction. We enumerated different train/test splits and reported the averaged performance, either using the finetuned model alone or pairing it with the best VLM or LRM." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 536, + 88, + 546 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 536, + 88, + 546 + ], + "spans": [ + { + "bbox": [ + 52, + 536, + 88, + 546 + ], + "type": "text", + "content": "Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "text", + "content": "Main results are summarized in Table 3. All models fell short in following real user instructions. The highest task success rate was only " + }, + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "inline_equation", + "content": "14.0\\%" + }, + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "text", + "content": " and the highest average progress was only " + }, + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "inline_equation", + "content": "28.7\\%" + }, + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "text", + "content": " a large gap compared to humans " + }, + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "inline_equation", + "content": "(93.4\\%)" + }, + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "text", + "content": " task success rate). This difference has a " + }, + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "text", + "content": " confidence interval of [71.3, 87.5], and is highly significant with p-value " + }, + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "inline_equation", + "content": "< 0.0001" + }, + { + "bbox": [ + 50, + 550, + 294, + 704 + ], + "type": "text", + "content": ". Grounding methods by themselves failed to finish most tasks. However, when paired with the best-performing grounding model (GTA-1), instructions generated by VLMs & LRMs significantly improved the performance. LRMs performed marginally better than most VLMs. Across all three metrics, Gemini 2.5 Flash, Gemini 2.5 Pro, and o3 showed the strongest performance. Finetuning GTA-1 on real user data marginally improved its perfor" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 316, + 55, + 558, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 55, + 558, + 99 + ], + "spans": [ + { + "bbox": [ + 316, + 55, + 558, + 99 + ], + "type": "text", + "content": "mance, but finetuning offered no benefit when GTA-1 was paired with VLMs and LRMs, since the finetuned model is trained to adapt to real users' instructions instead of instructions generated by VLM or LRM." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 409, + 109, + 466, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 409, + 109, + 466, + 120 + ], + "spans": [ + { + "bbox": [ + 409, + 109, + 466, + 120 + ], + "type": "text", + "content": "Discussion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 316, + 122, + 558, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 122, + 558, + 264 + ], + "spans": [ + { + "bbox": [ + 316, + 122, + 558, + 264 + ], + "type": "text", + "content": "Can grounding models understand real-world user instructions? There remains a significant gap in the performance of current direct grounding methods. The best grounding model, GUI-Actor, has a task success rate of only " + }, + { + "bbox": [ + 316, + 122, + 558, + 264 + ], + "type": "inline_equation", + "content": "5.7\\%" + }, + { + "bbox": [ + 316, + 122, + 558, + 264 + ], + "type": "text", + "content": ". Figure 5 illustrates various failure cases encountered when directly using GTA-1. Unsurprisingly, grounding models fail to interpret instructions requiring reasoning due to their limited reasoning capabilities. However, even for context-free instructions involving straightforward spatial reasoning—tasks where grounding methods should excel—they frequently misinterpret spatial layouts or rankings. For instance, they often incorrectly select elements for instructions such as \"click the first one.\"" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 315, + 265, + 558, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 265, + 558, + 396 + ], + "spans": [ + { + "bbox": [ + 315, + 265, + 558, + 396 + ], + "type": "text", + "content": "How can VLMs & LRMs help? VLMs or LRMs can convert the original user instructions into more direct and explicit descriptions that a grounding model can more easily understand. This is made possible by their reasoning capacities. For instance, in Figure 5A, the grounding model (GTA-1) on its own fails to select the first tab: it selects the first element instead of the first tab. However, it succeeds after o3 rewrites the instruction to refer to the title. As shown in Figure 5B, grounding models may sometimes still fail due to inherent limitations even when VLMs/LRMs generate clearer instructions. Nonetheless, incorporating VLMs or LRMs significantly improves overall performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 397, + 559, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 397, + 559, + 594 + ], + "spans": [ + { + "bbox": [ + 315, + 397, + 559, + 594 + ], + "type": "text", + "content": "What are the limitations of VLMs & LRMs? While VLMs and LRMs help, the highest task success rate is still only " + }, + { + "bbox": [ + 315, + 397, + 559, + 594 + ], + "type": "inline_equation", + "content": "14.0\\%" + }, + { + "bbox": [ + 315, + 397, + 559, + 594 + ], + "type": "text", + "content": ". Beyond errors from grounding models (e.g., Figure 5B), they continue to struggle with complex temporal reasoning. In Figure 5C, the user previously asked to open the first two search results in new tabs. When later instructed to \"look at the first one we just opened,\" o3 failed to identify which element \"the first one\" referred to—instead of the first newly opened tab, it pointed to the first search result. We further analyze the error distribution between reasoning errors (the VLM/LRM mistranslates the instruction and refers to the wrong element) and grounding errors (the rewritten instruction is correct, but the grounding model still fails to click the right element). For the best model " + }, + { + "bbox": [ + 315, + 397, + 559, + 594 + ], + "type": "inline_equation", + "content": "(\\mathrm{o}3 + \\mathrm{GTA} - 1)" + }, + { + "bbox": [ + 315, + 397, + 559, + 594 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 315, + 397, + 559, + 594 + ], + "type": "inline_equation", + "content": "43.3\\%" + }, + { + "bbox": [ + 315, + 397, + 559, + 594 + ], + "type": "text", + "content": " of errors are grounding errors and " + }, + { + "bbox": [ + 315, + 397, + 559, + 594 + ], + "type": "inline_equation", + "content": "56.7\\%" + }, + { + "bbox": [ + 315, + 397, + 559, + 594 + ], + "type": "text", + "content": " are reasoning errors. This suggests that current VLMs and LRMs still lack the reasoning and planning abilities needed to robustly perform sequential instruction-following tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 594, + 559, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 594, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 315, + 594, + 559, + 704 + ], + "type": "text", + "content": "Does learning from real-world user data help? Finetuning GTA-1 marginally improved average progress and step accuracy but yielded no additional benefit when paired with VLMs and LRMs. These results show that the finetuned model better understands real user instructions, yet it still fails to generalize to instructions generated by VLMs and LRMs. The experiments suggest that finetuning grounding models on a small set of real user instructions provides minimal benefit, and collecting large-scale real user instructions remains a significant challenge." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 90, + 51, + 520, + 289 + ], + "blocks": [ + { + "bbox": [ + 90, + 51, + 520, + 289 + ], + "lines": [ + { + "bbox": [ + 90, + 51, + 520, + 289 + ], + "spans": [ + { + "bbox": [ + 90, + 51, + 520, + 289 + ], + "type": "table", + "html": "
CategoryModelTask SuccessProgressStep Accuracy
HumanHuman Operator93.496.499.2
GroundingSet-of-Mark0.02.729.8
OS-Atlas0.03.826.6
Aria-UI0.02.432.8
UGround-V10.06.247.7
UI-TARS2.813.153.8
GTA-13.717.761.5
GUI-Actor5.714.761.4
VLM + GroundingGPT-4o + GTA-18.423.572.7
Qwen 2.5 72B + GTA-19.324.369.0
Gemini 2.5 Flash + GTA-111.226.975.4
LRM + Groundingo1 + GTA-17.517.768.2
Gemini 2.5 Pro + GTA-18.423.574.5
o4-mini + GTA-110.321.767.1
Claude 3.7 Sonnet + GTA-112.126.768.8
o3 + GTA-114.028.776.7
FinetunedGTA-1-F3.7 (+0.0)19.7 (+2.0)64.3 (+2.8)
Gemini 2.5 Flash + GTA-1-F11.2 (+0.0)26.9 (+0.0)75.4 (+0.0)
o3 + GTA-1-F14.0 (+0.0)28.7 (+0.0)76.7 (+0.0)
", + "image_path": "7e15864243a34993ec1a5ccc34c216e8c768944297d0dc2929d319e413d09841.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 297, + 560, + 332 + ], + "lines": [ + { + "bbox": [ + 50, + 297, + 560, + 332 + ], + "spans": [ + { + "bbox": [ + 50, + 297, + 560, + 332 + ], + "type": "text", + "content": "Table 2: Model Performance including task success rate, average progress, and step accuracy. All results are in %. The best performance of pretrained models and finetuned models is highlighted in bold. GTA-1-F indicates the finetuned GTA-1. Plus sign indicates the improvement compared to using the raw model for the same set of instructions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 55, + 358, + 198, + 540 + ], + "blocks": [ + { + "bbox": [ + 55, + 358, + 198, + 540 + ], + "lines": [ + { + "bbox": [ + 55, + 358, + 198, + 540 + ], + "spans": [ + { + "bbox": [ + 55, + 358, + 198, + 540 + ], + "type": "image", + "image_path": "7efbfd1a507fb1217230f8a285f7a1064e7d85faba0ce51e3b238de71309c6dd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 545, + 558, + 570 + ], + "lines": [ + { + "bbox": [ + 50, + 545, + 558, + 570 + ], + "spans": [ + { + "bbox": [ + 50, + 545, + 558, + 570 + ], + "type": "text", + "content": "Figure 5: Qualitative results. The captions show instructions generated by o3 (the best LRM). (A) Error corrected by using o3 to convert instructions. (B) Failure caused by GTA-1 when o3 reasons correctly. (C) Reasoning failure caused by o3." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 200, + 358, + 334, + 539 + ], + "blocks": [ + { + "bbox": [ + 200, + 358, + 334, + 539 + ], + "lines": [ + { + "bbox": [ + 200, + 358, + 334, + 539 + ], + "spans": [ + { + "bbox": [ + 200, + 358, + 334, + 539 + ], + "type": "image", + "image_path": "1560028b005585f00b7331b8ab6dda9bf201b15c7b0fadbaf391295da2699617.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 334, + 358, + 545, + 539 + ], + "blocks": [ + { + "bbox": [ + 334, + 358, + 545, + 539 + ], + "lines": [ + { + "bbox": [ + 334, + 358, + 545, + 539 + ], + "spans": [ + { + "bbox": [ + 334, + 358, + 545, + 539 + ], + "type": "image", + "image_path": "4d4d39ce25fbccf3bc1671ae26309bdc0979314bc11fc2de8468d2c3799b9ef5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 590, + 294, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 590, + 294, + 701 + ], + "spans": [ + { + "bbox": [ + 50, + 590, + 294, + 701 + ], + "type": "text", + "content": "Limitations. RealWebAssist represents an important first step towards evaluating web agents on long-horizon, real-user tasks. However, it has several limitations. The first is participant scale and diversity. Collecting real-user data is expensive and time-consuming. The number of participants is comparable to prior works that use expert annotators (Lu, Kasner, and Reddy 2024). However, we intend to increase user diversity in future versions of the benchmark. We will also open-source our data collection tools for community expansion of the dataset. Second, like prior benchmarks on" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 315, + 590, + 559, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 590, + 559, + 689 + ], + "spans": [ + { + "bbox": [ + 315, + 590, + 559, + 689 + ], + "type": "text", + "content": "real-world websites (Deng et al. 2024; Cheng et al. 2024), we constrain our evaluation to an offline setting to ensure reproducibility and safety. This is complementary to benchmarks that focus on interactive evaluation in sandbox environments (e.g., WebArena). We believe that web agents should be evaluated on both types of benchmarks to fully assess their capabilities. Lastly, the current setting does not allow dialogue between a user and the AI assistant, which we will explore in future work." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 142, + 53, + 202, + 65 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 53, + 202, + 65 + ], + "spans": [ + { + "bbox": [ + 142, + 53, + 202, + 65 + ], + "type": "text", + "content": "Conclusion" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 68, + 294, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 68, + 294, + 247 + ], + "spans": [ + { + "bbox": [ + 50, + 68, + 294, + 247 + ], + "type": "text", + "content": "In this paper, we present RealWebAssist, the first benchmark for evaluating web agents' ability to provide long-horizon web assistance with real-world users via sequential instruction-following. Our benchmark poses novel challenges, including spatial and temporal reasoning, planning, and adapting to user-specific routines. We conducted a comprehensive evaluation and analysis on multiple state-of-the-art GUI grounding models, VLMs, and LRMs, revealing critical limitations of them. We have also shown the limited benefit of finetuning models on real user data. Our benchmark, along with the well-annotated user instruction dataset, provides resources and diagnostic tools for further research on real-world web assistance. In future work, we plan to expand our human study to include more participants from various backgrounds, examine web assistance in interactive settings, and incorporate chat between users and web agents." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 255, + 223, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 255, + 223, + 268 + ], + "spans": [ + { + "bbox": [ + 121, + 255, + 223, + 268 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 270, + 293, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 270, + 293, + 293 + ], + "spans": [ + { + "bbox": [ + 51, + 270, + 293, + 293 + ], + "type": "text", + "content": "This work was supported by a research grant from Amazon. We thank Janice Chen for helpful discussions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 304, + 201, + 315 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 304, + 201, + 315 + ], + "spans": [ + { + "bbox": [ + 143, + 304, + 201, + 315 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 319, + 293, + 704 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 52, + 319, + 293, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 319, + 293, + 351 + ], + "spans": [ + { + "bbox": [ + 52, + 319, + 293, + 351 + ], + "type": "text", + "content": "Anthropic. 2025. Claude 3.7 Sonnet and Claude Code. https://www.anthropic.com/news/claudi-3-7-sonnet. Accessed: 2025-03-17." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 354, + 293, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 354, + 293, + 376 + ], + "spans": [ + { + "bbox": [ + 52, + 354, + 293, + 376 + ], + "type": "text", + "content": "Cai, T.; Wang, X.; Ma, T.; Chen, X.; and Zhou, D. 2024. Large Language Models as Tool Makers. arXiv:2305.17126." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 379, + 293, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 379, + 293, + 412 + ], + "spans": [ + { + "bbox": [ + 52, + 379, + 293, + 412 + ], + "type": "text", + "content": "Cheng, K.; Sun, Q.; Chu, Y.; Xu, F.; Li, Y.; Zhang, J.; and Wu, Z. 2024. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 415, + 293, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 415, + 293, + 458 + ], + "spans": [ + { + "bbox": [ + 52, + 415, + 293, + 458 + ], + "type": "text", + "content": "Deng, X.; Gu, Y.; Zheng, B.; Chen, S.; Stevens, S.; Wang, B.; Sun, H.; and Su, Y. 2024. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 460, + 293, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 460, + 293, + 494 + ], + "spans": [ + { + "bbox": [ + 52, + 460, + 293, + 494 + ], + "type": "text", + "content": "Fried, D.; Tomlin, N.; Hu, J.; Patel, R.; and Nematzadeh, A. 2023. Pragmatics in Language Grounding: Phenomena, Tasks, and Modeling Approaches. arXiv:2211.08371." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 496, + 293, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 496, + 293, + 529 + ], + "spans": [ + { + "bbox": [ + 52, + 496, + 293, + 529 + ], + "type": "text", + "content": "Goodman, N. D.; and Frank, M. C. 2016. Pragmatic language interpretation as probabilistic inference. Trends in cognitive sciences, 20(11): 818-829." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 531, + 293, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 531, + 293, + 576 + ], + "spans": [ + { + "bbox": [ + 52, + 531, + 293, + 576 + ], + "type": "text", + "content": "Gou, B.; Wang, R.; Zheng, B.; Xie, Y.; Chang, C.; Shu, Y.; Sun, H.; and Su, Y. 2024. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 578, + 293, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 578, + 293, + 621 + ], + "spans": [ + { + "bbox": [ + 52, + 578, + 293, + 621 + ], + "type": "text", + "content": "Gur, I.; Furuta, H.; Huang, A.; Safdari, M.; Matsuo, Y.; Eck, D.; and Faust, A. 2024. A Real-World WebAgent with Planning, Long Context Understanding, and Program Synthesis. arXiv:2307.12856." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 624, + 293, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 624, + 293, + 658 + ], + "spans": [ + { + "bbox": [ + 52, + 624, + 293, + 658 + ], + "type": "text", + "content": "He, H.; Yao, W.; Ma, K.; Yu, W.; Dai, Y.; Zhang, H.; Lan, Z.; and Yu, D. 2024. WebVoyager: Building an End-to-End Web Agent with Large Multimodal Models. arXiv:2401.13919." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 660, + 293, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 660, + 293, + 704 + ], + "spans": [ + { + "bbox": [ + 52, + 660, + 293, + 704 + ], + "type": "text", + "content": "Hong, W.; Wang, W.; Lv, Q.; Xu, J.; Yu, W.; Ji, J.; Wang, Y.; Wang, Z.; Zhang, Y.; Li, J.; Xu, B.; Dong, Y.; Ding, M.; and Tang, J. 2024. CogAgent: A Visual Language Model for GUI Agents. arXiv:2312.08914." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 55, + 559, + 704 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 317, + 55, + 558, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 55, + 558, + 110 + ], + "spans": [ + { + "bbox": [ + 317, + 55, + 558, + 110 + ], + "type": "text", + "content": "Humphreys, P. C.; Raposo, D.; Pohlen, T.; Thornton, G.; Chhaparia, R.; Muldal, A.; Abramson, J.; Georgiev, P.; Santoro, A.; and Lillicrap, T. 2022. A data-driven approach for learning to control computers. In International Conference on Machine Learning, 9466-9482. PMLR." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 112, + 559, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 112, + 559, + 156 + ], + "spans": [ + { + "bbox": [ + 317, + 112, + 559, + 156 + ], + "type": "text", + "content": "Jaech, A.; Kalai, A.; Lerer, A.; Richardson, A.; El-Kishky, A.; Low, A.; Helyar, A.; Madry, A.; Beutel, A.; Carney, A.; et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 159, + 558, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 159, + 558, + 203 + ], + "spans": [ + { + "bbox": [ + 317, + 159, + 558, + 203 + ], + "type": "text", + "content": "Jang, L.; Li, Y.; Zhao, D.; Ding, C.; Lin, J.; Liang, P. P.; Bonatti, R.; and Koishida, K. 2024. Videowebarena: Evaluating long context multimodal agents with video understanding web tasks. arXiv preprint arXiv:2410.19100." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 205, + 559, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 205, + 559, + 249 + ], + "spans": [ + { + "bbox": [ + 317, + 205, + 559, + 249 + ], + "type": "text", + "content": "Kim, M.; Bursztyn, V.; Koh, E.; Guo, S.; and Hwang, S.-w. 2024. Rada: Retrieval-augmented web agent planning with llms. In Findings of the Association for Computational Linguistics ACL 2024, 13511-13525." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 251, + 559, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 251, + 559, + 295 + ], + "spans": [ + { + "bbox": [ + 317, + 251, + 559, + 295 + ], + "type": "text", + "content": "Kirillov, A.; Mintun, E.; Ravi, N.; Mao, H.; Rolland, C.; Gustafson, L.; Xiao, T.; Whitehead, S.; Berg, A. C.; Lo, W.-Y.; Dollar, P.; and Girshick, R. 2023. Segment Anything. arXiv:2304.02643." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 297, + 559, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 297, + 559, + 342 + ], + "spans": [ + { + "bbox": [ + 317, + 297, + 559, + 342 + ], + "type": "text", + "content": "Li, F.; Zhang, H.; Sun, P.; Zou, X.; Liu, S.; Yang, J.; Li, C.; Zhang, L.; and Gao, J. 2023. Semantic-SAM: Segment and Recognize Anything at Any Granularity. arXiv preprint arXiv:2307.04767." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 344, + 558, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 344, + 558, + 378 + ], + "spans": [ + { + "bbox": [ + 317, + 344, + 558, + 378 + ], + "type": "text", + "content": "Liu, E. Z.; Guu, K.; Pasupat, P.; Shi, T.; and Liang, P. 2018. Reinforcement learning on web interfaces using workflow-guided exploration. arXiv preprint arXiv:1802.08802." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 380, + 559, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 380, + 559, + 423 + ], + "spans": [ + { + "bbox": [ + 317, + 380, + 559, + 423 + ], + "type": "text", + "content": "Liu, Z.; Yao, W.; Zhang, J.; Xue, L.; Heinecke, S.; Murthy, R.; Feng, Y.; Chen, Z.; Niebles, J. C.; Arpit, D.; et al. 2023. Bolaa: Benchmarking and orchestrating llm-augmented autonomous agents. arXiv preprint arXiv:2308.05960." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 426, + 559, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 426, + 559, + 459 + ], + "spans": [ + { + "bbox": [ + 317, + 426, + 559, + 459 + ], + "type": "text", + "content": "Lü, X. H.; Kasner, Z.; and Reddy, S. 2024. Weblinx: Realworld website navigation with multi-turn dialogue. arXiv preprint arXiv:2402.05930." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 460, + 559, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 460, + 559, + 505 + ], + "spans": [ + { + "bbox": [ + 317, + 460, + 559, + 505 + ], + "type": "text", + "content": "Majumder, B. P.; Mishra, B. D.; Jansen, P.; Tafjord, O.; Tandon, N.; Zhang, L.; Callison-Burch, C.; and Clark, P. 2023. CLIN: A Continually Learning Language Agent for Rapid Task Adaptation and Generalization. arXiv:2310.10134." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 507, + 559, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 507, + 559, + 540 + ], + "spans": [ + { + "bbox": [ + 317, + 507, + 559, + 540 + ], + "type": "text", + "content": "McCarthy, W. P.; Hawkins, R. D.; Wang, H.; Holdaway, C.; and Fan, J. E. 2021. Learning to communicate about shared procedural abstractions. arXiv preprint arXiv:2107.00077." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 543, + 559, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 543, + 559, + 608 + ], + "spans": [ + { + "bbox": [ + 317, + 543, + 559, + 608 + ], + "type": "text", + "content": "Nakano, R.; Hilton, J.; Balaji, S.; Wu, J.; Ouyang, L.; Kim, C.; Hesse, C.; Jain, S.; Kosaraju, V.; Saunders, W.; Jiang, X.; Cobbe, K.; Eloundou, T.; Krueger, G.; Button, K.; Knight, M.; Chess, B.; and Schulman, J. 2022. WebGPT: Browser-assisted question-answering with human feedback. arXiv:2112.09332." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 611, + 559, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 611, + 559, + 644 + ], + "spans": [ + { + "bbox": [ + 317, + 611, + 559, + 644 + ], + "type": "text", + "content": "Nogueira, R.; and Cho, K. 2016. End-to-end goal-driven web navigation. Advances in neural information processing systems, 29." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 647, + 559, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 647, + 559, + 667 + ], + "spans": [ + { + "bbox": [ + 317, + 647, + 559, + 667 + ], + "type": "text", + "content": "OpenAI. 2023. GPT-4 Technical Report. ArXiv, abs/2303.08774." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 670, + 559, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 670, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 317, + 670, + 559, + 704 + ], + "type": "text", + "content": "Pan, J.; Zhang, Y.; Tomlin, N.; Zhou, Y.; Levine, S.; and Suhr, A. 2024. Autonomous Evaluation and Refinement of Digital Agents. arXiv:2404.06474." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 54, + 294, + 704 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 51, + 54, + 294, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 54, + 294, + 87 + ], + "spans": [ + { + "bbox": [ + 51, + 54, + 294, + 87 + ], + "type": "text", + "content": "Pang, R. Y.; Yuan, W.; Cho, K.; He, H.; Sukhbaatar, S.; and Weston, J. 2024. Iterative Reasoning Preference Optimization. arXiv:2404.19733." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 91, + 294, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 91, + 294, + 137 + ], + "spans": [ + { + "bbox": [ + 52, + 91, + 294, + 137 + ], + "type": "text", + "content": "Putta, P.; Mills, E.; Garg, N.; Motwani, S.; Finn, C.; Garg, D.; and Rafailov, R. 2024. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 139, + 294, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 139, + 294, + 185 + ], + "spans": [ + { + "bbox": [ + 51, + 139, + 294, + 185 + ], + "type": "text", + "content": "Qin, Y.; Ye, Y.; Fang, J.; Wang, H.; Liang, S.; Tian, S.; Zhang, J.; Li, J.; Li, Y.; Huang, S.; et al. 2025. UI-TARS: Pioneering Automated GUI Interaction with Native Agents. arXiv preprint arXiv:2501.12326." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 188, + 294, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 188, + 294, + 277 + ], + "spans": [ + { + "bbox": [ + 51, + 188, + 294, + 277 + ], + "type": "text", + "content": "Qwen;.; Yang, A.; Yang, B.; Zhang, B.; Hui, B.; Zheng, B.; Yu, B.; Li, C.; Liu, D.; Huang, F.; Wei, H.; Lin, H.; Yang, J.; Tu, J.; Zhang, J.; Yang, J.; Yang, J.; Zhou, J.; Lin, J.; Dang, K.; Lu, K.; Bao, K.; Yang, K.; Yu, L.; Li, M.; Xue, M.; Zhang, P.; Zhu, Q.; Men, R.; Lin, R.; Li, T.; Tang, T.; Xia, T.; Ren, X.; Ren, X.; Fan, Y.; Su, Y.; Zhang, Y.; Wan, Y.; Liu, Y.; Cui, Z.; Zhang, Z.; and Qiu, Z. 2025. Qwen2.5 Technical Report. arXiv:2412.15115." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 280, + 294, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 280, + 294, + 325 + ], + "spans": [ + { + "bbox": [ + 51, + 280, + 294, + 325 + ], + "type": "text", + "content": "Radford, A.; Kim, J. W.; Xu, T.; Brockman, G.; McLeavey, C.; and Sutskever, I. 2023. Robust speech recognition via large-scale weak supervision. In International conference on machine learning, 28492-28518. PMLR." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 328, + 294, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 328, + 294, + 373 + ], + "spans": [ + { + "bbox": [ + 51, + 328, + 294, + 373 + ], + "type": "text", + "content": "Reddy, C. K.; Beyrami, E.; Pool, J.; Cutler, R.; Srinivasan, S.; and Gehrke, J. 2019. A scalable noisy speech dataset and online subjective test framework. arXiv preprint arXiv:1909.08050." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 376, + 294, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 376, + 294, + 422 + ], + "spans": [ + { + "bbox": [ + 51, + 376, + 294, + 422 + ], + "type": "text", + "content": "Shi, T.; Karpathy, A.; Fan, L.; Hernandez, J.; and Liang, P. 2017. World of bits: An open-domain platform for web-based agents. In International Conference on Machine Learning, 3135-3144. PMLR." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 425, + 294, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 425, + 294, + 459 + ], + "spans": [ + { + "bbox": [ + 51, + 425, + 294, + 459 + ], + "type": "text", + "content": "Song, Y.; Thai, K.; Pham, C. M.; Chang, Y.; Nadaf, M.; and Iyyer, M. 2025. Bearcubs: A benchmark for computer-using web agents. arXiv preprint arXiv:2503.07919." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 462, + 294, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 462, + 294, + 496 + ], + "spans": [ + { + "bbox": [ + 51, + 462, + 294, + 496 + ], + "type": "text", + "content": "Team. 2025. Gemini 2.5: Pushing the Frontier with Advanced Reasoning, Multimodality, Long Context, and Next Generation Agentic Capabilities. arXiv:2507.06261." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 499, + 294, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 499, + 294, + 522 + ], + "spans": [ + { + "bbox": [ + 51, + 499, + 294, + 522 + ], + "type": "text", + "content": "Wang, Z. Z.; Mao, J.; Fried, D.; and Neubig, G. 2024. Agent workflow memory. arXiv preprint arXiv:2409.07429." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 525, + 294, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 525, + 294, + 570 + ], + "spans": [ + { + "bbox": [ + 51, + 525, + 294, + 570 + ], + "type": "text", + "content": "Wu, Q.; Cheng, K.; Yang, R.; Zhang, C.; Yang, J.; Jiang, H.; Mu, J.; Peng, B.; Qiao, B.; Tan, R.; et al. 2025. GUI-Actor: Coordinate-Free Visual Grounding for GUI Agents. arXiv preprint arXiv:2506.03143." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 574, + 294, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 574, + 294, + 608 + ], + "spans": [ + { + "bbox": [ + 51, + 574, + 294, + 608 + ], + "type": "text", + "content": "Wu, Z.; Han, C.; Ding, Z.; Weng, Z.; Liu, Z.; Yao, S.; Yu, T.; and Kong, L. 2024a. OS-Copilot: Towards Generalist Computer Agents with Self-Improvement. arXiv:2402.07456." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 611, + 294, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 611, + 294, + 656 + ], + "spans": [ + { + "bbox": [ + 51, + 611, + 294, + 656 + ], + "type": "text", + "content": "Wu, Z.; Wu, Z.; Xu, F.; Wang, Y.; Sun, Q.; Jia, C.; Cheng, K.; Ding, Z.; Chen, L.; Liang, P. P.; et al. 2024b. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 659, + 294, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 659, + 294, + 704 + ], + "spans": [ + { + "bbox": [ + 51, + 659, + 294, + 704 + ], + "type": "text", + "content": "Xu, Y.; Wang, Z.; Wang, J.; Lu, D.; Xie, T.; Saha, A.; Sahoo, D.; Yu, T.; and Xiong, C. 2024. Aguvis: Unified Pure Vision Agents for Autonomous GUI Interaction. arXiv:2412.04454." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 54, + 559, + 603 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 317, + 54, + 559, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 54, + 559, + 99 + ], + "spans": [ + { + "bbox": [ + 317, + 54, + 559, + 99 + ], + "type": "text", + "content": "Xu, Y.; Wang, Z.; Wang, J.; Lu, D.; Xie, T.; Saha, A.; Sahoo, D.; Yu, T.; and Xiong, C. 2025. Aguvis: Unified Pure Vision Agents for Autonomous GUI Interaction. arXiv:2412.04454." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 100, + 559, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 100, + 559, + 134 + ], + "spans": [ + { + "bbox": [ + 317, + 100, + 559, + 134 + ], + "type": "text", + "content": "Yang, J.; Zhang, H.; Li, F.; Zou, X.; Li, C.; and Gao, J. 2023. Set-of-Mark Prompting Unleashes Extraordinary Visual Grounding in GPT-4V. arXiv preprint arXiv:2310.11441." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 137, + 559, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 137, + 559, + 170 + ], + "spans": [ + { + "bbox": [ + 317, + 137, + 559, + 170 + ], + "type": "text", + "content": "Yang, Y.; Li, D.; Dai, Y.; Yang, Y.; Luo, Z.; Zhao, Z.; Hu, Z.; Huang, J.; Saha, A.; Chen, Z.; et al. 2025. GTA1: GUI Test-time Scaling Agent. arXiv preprint arXiv:2507.05791." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 172, + 559, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 172, + 559, + 205 + ], + "spans": [ + { + "bbox": [ + 317, + 172, + 559, + 205 + ], + "type": "text", + "content": "Yang, Y.; Wang, Y.; Li, D.; Luo, Z.; Chen, B.; Huang, C.; and Li, J. 2024. Aria-UI: Visual Grounding for GUI Instructions. arXiv preprint arXiv:2412.16256." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 207, + 559, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 207, + 559, + 251 + ], + "spans": [ + { + "bbox": [ + 317, + 207, + 559, + 251 + ], + "type": "text", + "content": "Yao, S.; Chen, H.; Yang, J.; and Narasimhan, K. 2022. Webshop: Towards scalable real-world web interaction with grounded language agents. Advances in Neural Information Processing Systems, 35: 20744-20757." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 253, + 559, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 253, + 559, + 287 + ], + "spans": [ + { + "bbox": [ + 317, + 253, + 559, + 287 + ], + "type": "text", + "content": "Yao, S.; Zhao, J.; Yu, D.; Du, N.; Shafran, I.; Narasimhan, K.; and Cao, Y. 2023. ReAct: Synergizing Reasoning and Acting in Language Models. arXiv:2210.03629." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 289, + 559, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 289, + 559, + 332 + ], + "spans": [ + { + "bbox": [ + 317, + 289, + 559, + 332 + ], + "type": "text", + "content": "Ying, L.; Liu, J. X.; Aanya, S.; Fang, Y.; Tellex, S.; Tenenbaum, J. B.; and Shu, T. 2024. SIFToM: Robust Spoken Instruction Following through Theory of Mind. arXiv:2409.10849." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 335, + 559, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 335, + 559, + 379 + ], + "spans": [ + { + "bbox": [ + 317, + 335, + 559, + 379 + ], + "type": "text", + "content": "Yuan, X.; Zhang, J.; Li, K.; Cai, Z.; Yao, L.; Chen, J.; Wang, E.; Hou, Q.; Chen, J.; Jiang, P.-T.; and Li, B. 2025. Enhancing Visual Grounding for GUI Agents via Self-Evolutionary Reinforcement Learning. arXiv:2505.12370." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 380, + 559, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 380, + 559, + 415 + ], + "spans": [ + { + "bbox": [ + 317, + 380, + 559, + 415 + ], + "type": "text", + "content": "Zhang, C.; Yang, Z.; Liu, J.; Han, Y.; Chen, X.; Huang, Z.; Fu, B.; and Yu, G. 2023. AppAgent: Multimodal Agents as Smartphone Users. arXiv:2312.13771." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 416, + 559, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 416, + 559, + 440 + ], + "spans": [ + { + "bbox": [ + 317, + 416, + 559, + 440 + ], + "type": "text", + "content": "Zhang, Z.; and Zhang, A. 2024. You Only Look at Screens: Multimodal Chain-of-Action Agents. arXiv:2309.11436." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 441, + 559, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 441, + 559, + 475 + ], + "spans": [ + { + "bbox": [ + 317, + 441, + 559, + 475 + ], + "type": "text", + "content": "Zheng, B.; Gou, B.; Kil, J.; Sun, H.; and Su, Y. 2024a. Gpt-4v (ision) is a generalist web agent, if grounded. arXiv preprint arXiv:2401.01614." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 476, + 559, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 476, + 559, + 510 + ], + "spans": [ + { + "bbox": [ + 317, + 476, + 559, + 510 + ], + "type": "text", + "content": "Zheng, L.; Wang, R.; Wang, X.; and An, B. 2024b. Synapse: Trajectory-as-Exemplar Prompting with Memory for Computer Control. arXiv:2306.07863." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 512, + 559, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 512, + 559, + 555 + ], + "spans": [ + { + "bbox": [ + 317, + 512, + 559, + 555 + ], + "type": "text", + "content": "Zhou, A.; Yan, K.; Shlapentokh-Rothman, M.; Wang, H.; and Wang, Y.-X. 2024. Language Agent Tree Search Unifies Reasoning Acting and Planning in Language Models. arXiv:2310.04406." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 558, + 559, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 558, + 559, + 603 + ], + "spans": [ + { + "bbox": [ + 317, + 558, + 559, + 603 + ], + "type": "text", + "content": "Zhou, S.; Xu, F. F.; Zhu, H.; Zhou, X.; Lo, R.; Sridhar, A.; Cheng, X.; Ou, T.; Bisk, Y.; Fried, D.; et al. 2023. Webarena: A realistic web environment for building autonomous agents. arXiv preprint arXiv:2307.13854." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 269, + 65, + 343, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 65, + 343, + 86 + ], + "spans": [ + { + "bbox": [ + 269, + 65, + 343, + 86 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 108, + 149, + 237, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 149, + 237, + 163 + ], + "spans": [ + { + "bbox": [ + 108, + 149, + 237, + 163 + ], + "type": "text", + "content": "More experiment results" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 165, + 238, + 177 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 165, + 238, + 177 + ], + "spans": [ + { + "bbox": [ + 52, + 165, + 238, + 177 + ], + "type": "text", + "content": "Full VLM & LRM + Grounding results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 178, + 293, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 178, + 293, + 245 + ], + "spans": [ + { + "bbox": [ + 52, + 178, + 293, + 245 + ], + "type": "text", + "content": "For the best three grounding models, GTA-1 (Yang et al. 2025), GUI-Actor (Wu et al. 2025) and UI-TARS (Qin et al. 2025), we test their pairing with all the VLMs and LRMs. Table 3 shows the full results. All the evaluation experiments are run on a single A100 GPU for 20 - 40 minutes. Finetuning GTA-1 model takes 4 hours on 4 A100 GPUs." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 53, + 281, + 290, + 460 + ], + "blocks": [ + { + "bbox": [ + 52, + 254, + 248, + 266 + ], + "lines": [ + { + "bbox": [ + 52, + 254, + 248, + 266 + ], + "spans": [ + { + "bbox": [ + 52, + 254, + 248, + 266 + ], + "type": "text", + "content": "Experiment with different context lengths" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 53, + 281, + 290, + 460 + ], + "lines": [ + { + "bbox": [ + 53, + 281, + 290, + 460 + ], + "spans": [ + { + "bbox": [ + 53, + 281, + 290, + 460 + ], + "type": "image", + "image_path": "95dcf2badd6229a6cb610ae8fd4f5db862d76b97acd78d06b8096a64989af287.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 470, + 293, + 493 + ], + "lines": [ + { + "bbox": [ + 52, + 470, + 293, + 493 + ], + "spans": [ + { + "bbox": [ + 52, + 470, + 293, + 493 + ], + "type": "text", + "content": "Figure 6: Effect of context length on Gemini 2.5 Flash + GTA-1." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 505, + 293, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 505, + 293, + 637 + ], + "spans": [ + { + "bbox": [ + 53, + 505, + 293, + 637 + ], + "type": "text", + "content": "We evaluated the best-performing VLM (Gemini 2.5 Flash) + GTA-1 with varying history context lengths, from no history to 20 steps. An ideal assistant should be able to leverage different kinds of historical context based on different instructions, ranging from no history to multi-task history context (e.g., for routine learning). As shown in Figure 6, increasing context length also does not necessarily lead to better performance. Gemini 2.5 Flash + GTA-1 achieved the highest task success rate with a context length of 10, and increasing the context length further led to poorer performance. This suggest the limitation of VLM in effectively utilizing historical context for reasoning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 646, + 222, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 646, + 222, + 658 + ], + "spans": [ + { + "bbox": [ + 52, + 646, + 222, + 658 + ], + "type": "text", + "content": "Effect of Speech Recognition Errors" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 660, + 293, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 660, + 293, + 705 + ], + "spans": [ + { + "bbox": [ + 52, + 660, + 293, + 705 + ], + "type": "text", + "content": "All baseline experiments use the ground truth transcripts of user speech instructions as input to ensure that performance is not affected by errors in speech-to-text transcription. However, in real-world settings, instructions are of-" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 150, + 558, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 150, + 558, + 370 + ], + "spans": [ + { + "bbox": [ + 317, + 150, + 558, + 370 + ], + "type": "text", + "content": "ten given via speech. To reflect this, we evaluated the effect of speech recognition on the agent's performance by using the transcripts generated from a state-of-the-art automatic speech recognition (ASR) model, Whisper LargeV3 (Radford et al. 2023). Additionally, since users may not always be in quiet, controlled environments using a high-quality microphone like in our user experiment setup, we simulated noisy environments by injecting background noise with noise files from the Microsoft Scalable Noisy Speech Dataset (MS-SNSD) dataset (Reddy et al. 2019), following (Ying et al. 2024). The noise files include people talking in the background and keyboard typing sounds. As shown in Table 4, using speech recognition resulted in a " + }, + { + "bbox": [ + 317, + 150, + 558, + 370 + ], + "type": "inline_equation", + "content": "1.9\\%" + }, + { + "bbox": [ + 317, + 150, + 558, + 370 + ], + "type": "text", + "content": " drop in task success rate, and having noisy speech resulted in a further " + }, + { + "bbox": [ + 317, + 150, + 558, + 370 + ], + "type": "inline_equation", + "content": "1.9\\%" + }, + { + "bbox": [ + 317, + 150, + 558, + 370 + ], + "type": "text", + "content": " drop. In contrast, the word error rate (WER) of the ASR results increased from " + }, + { + "bbox": [ + 317, + 150, + 558, + 370 + ], + "type": "inline_equation", + "content": "1.4\\%" + }, + { + "bbox": [ + 317, + 150, + 558, + 370 + ], + "type": "text", + "content": " (original speech) to " + }, + { + "bbox": [ + 317, + 150, + 558, + 370 + ], + "type": "inline_equation", + "content": "28.1\\%" + }, + { + "bbox": [ + 317, + 150, + 558, + 370 + ], + "type": "text", + "content": " (noisy speech), a much larger performance drop compared to the final task performance. This result suggests that reasoning the true meanings of speech instructions by leveraging context can help mitigate errors from ASR." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 364, + 383, + 512, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 383, + 512, + 396 + ], + "spans": [ + { + "bbox": [ + 364, + 383, + 512, + 396 + ], + "type": "text", + "content": "Dataset Construction Details" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 401, + 558, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 401, + 558, + 511 + ], + "spans": [ + { + "bbox": [ + 317, + 401, + 558, + 511 + ], + "type": "text", + "content": "Video Segmenting. As shown in the video example, the interactive sessions are highly dynamic, and spoken instructions do not always align cleanly with specific screens or timesteps. Automatically segmenting instructions and matching them to corresponding webpages and actions using heuristics would risk significantly degrading data quality. Therefore, we manually segment the live sessions using video editing software to construct the final RealWebAssist dataset. All participants provided consent to have their speech recorded and included in this dataset." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 512, + 558, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 512, + 558, + 600 + ], + "spans": [ + { + "bbox": [ + 317, + 512, + 558, + 600 + ], + "type": "text", + "content": "Bounding Box Labeling. As shown in Figure 7, certain instructions like \"close all the tabs\" may correspond to multiple valid actions, since closing any of the tabs first would be reasonable. Therefore, we add bounding boxes to all of the elements that would be correct. The bounding boxes are drawn manually using a Python tool built with tkinter, and the clickable regions are determined by a visual inspection of the webpage." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 383, + 613, + 492, + 626 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 383, + 613, + 492, + 626 + ], + "spans": [ + { + "bbox": [ + 383, + 613, + 492, + 626 + ], + "type": "text", + "content": "More Dataset Details" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 318, + 632, + 400, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 632, + 400, + 643 + ], + "spans": [ + { + "bbox": [ + 318, + 632, + 400, + 643 + ], + "type": "text", + "content": "Evaluation detail" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 649, + 559, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 649, + 559, + 705 + ], + "spans": [ + { + "bbox": [ + 317, + 649, + 559, + 705 + ], + "type": "text", + "content": "User instructions in RealWebAssist require different operations on the webpage, including clicking, scrolling and typing. We believe that action types other than clicking is trivial (for typing actions, the benchmark includes the step of finding the correct place to type instead of the actual typing" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 103, + 51, + 507, + 319 + ], + "blocks": [ + { + "bbox": [ + 103, + 51, + 507, + 319 + ], + "lines": [ + { + "bbox": [ + 103, + 51, + 507, + 319 + ], + "spans": [ + { + "bbox": [ + 103, + 51, + 507, + 319 + ], + "type": "table", + "html": "
VLM + GTA-1GPT-4o + GTA-18.423.572.7
Qwen 2.5 72B + GTA-19.324.369.0
Gemini 2.5 Flash + GTA-111.226.975.4
LRM + GTA-1Claude 3.7 Sonnet + GTA-112.126.768.8
Gemini 2.5 Pro + GTA-18.423.574.5
o1 + GTA-17.521.173.1
o3 + GTA-114.028.776.7
o4-mini + GTA-110.321.767.1
VLM + GUI-ACTORGPT-4o + GUI-Actor6.518.067.0
Qwen 2.5 72B + GUI-Actor9.321.464.9
Gemini 2.5 Flash + GUI-Actor10.325.673.1
LRM + GUI-ACTORClaude 3.7 Sonnet+ GUI-Actor7.518.563.9
Gemini 2.5 Pro + GUI-Actor9.324.073.2
o1 + GUI-Actor7.517.768.2
o3 + GUI-Actor12.127.474.0
o4-mini + GUI-Actor8.420.065.1
VLM + UI-TARSGPT-4o + UI-TARS6.520.867.3
Qwen 2.5 72B + UI-TARS7.521.863.2
Gemini 2.5 Flash + UI-TARS9.324.170.2
LRM + UI-TARSClaude 3.7 Sonnet + UI-TARS9.317.561.5
Gemini 2.5 Pro + UI-TARS7.523.471.6
o1 + UI-TARS6.518.566.0
o3 + UI-TARS12.127.272.4
o4-mini + UI-TARS7.519.462.5
", + "image_path": "e28c5b91165be0534e0d20d81aba0b282b8600cebcd81bf68b8bff288a0a0eca.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 52, + 359, + 292, + 449 + ], + "blocks": [ + { + "bbox": [ + 50, + 327, + 558, + 352 + ], + "lines": [ + { + "bbox": [ + 50, + 327, + 558, + 352 + ], + "spans": [ + { + "bbox": [ + 50, + 327, + 558, + 352 + ], + "type": "text", + "content": "Table 3: Model Performance for pairing GTA-1, GUI-Actor and UI-TARS with all LRMs & VLMs, including task success rate, average progress, and step accuracy. All results are in %." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 359, + 292, + 449 + ], + "lines": [ + { + "bbox": [ + 52, + 359, + 292, + 449 + ], + "spans": [ + { + "bbox": [ + 52, + 359, + 292, + 449 + ], + "type": "table", + "html": "
Input TranscriptTask SuccessProgressStep Accuracy
Ground Truth10.321.766.4
Whisper Large-V38.420.965.5
Whisper Large-V3 (Noise)6.520.663.4
", + "image_path": "c0dcda412e8aea234339339191505fc7c36ae8cf5fbe152af1a3683d49902da2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 456, + 294, + 504 + ], + "lines": [ + { + "bbox": [ + 50, + 456, + 294, + 504 + ], + "spans": [ + { + "bbox": [ + 50, + 456, + 294, + 504 + ], + "type": "text", + "content": "Table 4: Performance of GPT-4o + UGround-V1 using (1) ground-truth transcripts, (2) transcripts generated from original user speech by Whisper Large-V3, and (3) transcripts generated from noisy speech by Whisper Large-V3." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 50, + 523, + 294, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 523, + 294, + 592 + ], + "spans": [ + { + "bbox": [ + 50, + 523, + 294, + 592 + ], + "type": "text", + "content": "process), so we only evaluate click-type actions with annotated bounding boxes are scored; instructions like \"scroll\" remain in the history but are not counted in our metrics. Of the 1,885 instructions, 1,412 are scored, yielding 1,714 evaluated action steps (one screenshot per step). Tasks average 17.6 evaluated steps." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 600, + 125, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 600, + 125, + 611 + ], + "spans": [ + { + "bbox": [ + 51, + 600, + 125, + 611 + ], + "type": "text", + "content": "User behaviors" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 615, + 293, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 615, + 293, + 658 + ], + "spans": [ + { + "bbox": [ + 50, + 615, + 293, + 658 + ], + "type": "text", + "content": "Figure 8 shows diverse user behaviors in RealWebAssist not present in previous benchmarks. We include a zip file of the live recordings (including audio) from which the examples are taken." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 659, + 293, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 659, + 293, + 693 + ], + "spans": [ + { + "bbox": [ + 50, + 659, + 293, + 693 + ], + "type": "text", + "content": "Information seeking As Figure 8A shows, the user is seeking information from different aspects, like images and ratings, before they make the purchase decision." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 693, + 293, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 693, + 293, + 704 + ], + "spans": [ + { + "bbox": [ + 51, + 693, + 293, + 704 + ], + "type": "text", + "content": "Comparing different options Figure 8B shows the process" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 361, + 558, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 361, + 558, + 383 + ], + "spans": [ + { + "bbox": [ + 315, + 361, + 558, + 383 + ], + "type": "text", + "content": "of the user viewing two candidates and finally make the decision between them." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 384, + 558, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 384, + 558, + 427 + ], + "spans": [ + { + "bbox": [ + 315, + 384, + 558, + 427 + ], + "type": "text", + "content": "Changing minds In Figure 8C, the user is searching for some immersive dining experience. They are checking different restaurants and frequently change their minds when they see more options." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 315, + 427, + 558, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 427, + 558, + 472 + ], + "spans": [ + { + "bbox": [ + 315, + 427, + 558, + 472 + ], + "type": "text", + "content": "Trial-and-error As Figure 8D shows, the user has several unsuccessful attempts when searching for men's fashion week. They refer to previous searches or initiate new ones to look for what they want." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 472, + 558, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 472, + 558, + 528 + ], + "spans": [ + { + "bbox": [ + 315, + 472, + 558, + 528 + ], + "type": "text", + "content": "These diverse behaviors increase the complexity of the web assistance: instead of clearly defined-goals, the user themselves are also actively collecting knowledge to make decisions, which requires web assistant to follow the user's mind and act accordingly." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 71, + 549, + 197 + ], + "blocks": [ + { + "bbox": [ + 111, + 60, + 224, + 70 + ], + "lines": [ + { + "bbox": [ + 111, + 60, + 224, + 70 + ], + "spans": [ + { + "bbox": [ + 111, + 60, + 224, + 70 + ], + "type": "text", + "content": "\"Close all the tabs\"" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 53, + 71, + 549, + 197 + ], + "lines": [ + { + "bbox": [ + 53, + 71, + 549, + 197 + ], + "spans": [ + { + "bbox": [ + 53, + 71, + 549, + 197 + ], + "type": "image", + "image_path": "85af84f589e6a25c2962527918386f0b16a85e37584505f52029ff88a2d1a816.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 262, + 212, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 262, + 212, + 278 + ], + "spans": [ + { + "bbox": [ + 58, + 262, + 212, + 278 + ], + "type": "text", + "content": "A Information seeking" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 58, + 281, + 561, + 361 + ], + "blocks": [ + { + "bbox": [ + 50, + 213, + 558, + 248 + ], + "lines": [ + { + "bbox": [ + 50, + 213, + 558, + 248 + ], + "spans": [ + { + "bbox": [ + 50, + 213, + 558, + 248 + ], + "type": "text", + "content": "Figure 7: Example of annotated bounding boxes for an instruction. The red boxes represent the correct bounding boxes. The user gave the instruction \"Close all the tabs\". For evaluation purposes, closing any of the tabs first is considered correct at each step, so all the x marks are labeled as correct at each step." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 58, + 281, + 561, + 361 + ], + "lines": [ + { + "bbox": [ + 58, + 281, + 561, + 361 + ], + "spans": [ + { + "bbox": [ + 58, + 281, + 561, + 361 + ], + "type": "image", + "image_path": "461528e1a0f26a59ab974cdb764d3878636aa1d9dbf8cf802bb648c7768dafcf.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 57, + 369, + 270, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 369, + 270, + 385 + ], + "spans": [ + { + "bbox": [ + 57, + 369, + 270, + 385 + ], + "type": "text", + "content": "B Comparing different options" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 57, + 387, + 561, + 475 + ], + "blocks": [ + { + "bbox": [ + 57, + 387, + 561, + 475 + ], + "lines": [ + { + "bbox": [ + 57, + 387, + 561, + 475 + ], + "spans": [ + { + "bbox": [ + 57, + 387, + 561, + 475 + ], + "type": "image", + "image_path": "d437676042ad38ac634c0d36d327cc10e98ca3bb9a29f7c189a150c8f1375f3c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 57, + 487, + 184, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 487, + 184, + 504 + ], + "spans": [ + { + "bbox": [ + 57, + 487, + 184, + 504 + ], + "type": "text", + "content": "C Changing minds" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 58, + 508, + 561, + 582 + ], + "blocks": [ + { + "bbox": [ + 58, + 508, + 561, + 582 + ], + "lines": [ + { + "bbox": [ + 58, + 508, + 561, + 582 + ], + "spans": [ + { + "bbox": [ + 58, + 508, + 561, + 582 + ], + "type": "image", + "image_path": "2804f0236f67ed074ccb568daf1db5e3988534a2f55bf4a59dc12969c3573f75.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 58, + 590, + 177, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 590, + 177, + 605 + ], + "spans": [ + { + "bbox": [ + 58, + 590, + 177, + 605 + ], + "type": "text", + "content": "D Trial-and-error" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 58, + 609, + 547, + 682 + ], + "blocks": [ + { + "bbox": [ + 58, + 609, + 547, + 682 + ], + "lines": [ + { + "bbox": [ + 58, + 609, + 547, + 682 + ], + "spans": [ + { + "bbox": [ + 58, + 609, + 547, + 682 + ], + "type": "image", + "image_path": "8bfb0f9626c848519a4275d943677e17f7c84cbd0ee00c672e1e4d2e7106f0da.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 183, + 697, + 425, + 710 + ], + "lines": [ + { + "bbox": [ + 183, + 697, + 425, + 710 + ], + "spans": [ + { + "bbox": [ + 183, + 697, + 425, + 710 + ], + "type": "text", + "content": "Figure 8: Example of rich user behaviors in RealWebAssist." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 100, + 80, + 191, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 80, + 191, + 92 + ], + "spans": [ + { + "bbox": [ + 100, + 80, + 191, + 92 + ], + "type": "text", + "content": "Task # Description" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 120, + 96, + 510, + 699 + ], + "type": "list", + "angle": 0, + "index": 45, + "blocks": [ + { + "bbox": [ + 121, + 96, + 386, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 96, + 386, + 108 + ], + "spans": [ + { + "bbox": [ + 121, + 96, + 386, + 108 + ], + "type": "text", + "content": " 1 Buy a gift for each of my three friends with a budget of $100" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 122, + 108, + 449, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 108, + 449, + 119 + ], + "spans": [ + { + "bbox": [ + 122, + 108, + 449, + 119 + ], + "type": "text", + "content": "2 Find and buy a birthday gift for a friend who loves tech, within a $50 budget." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 123, + 119, + 367, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 119, + 367, + 129 + ], + "spans": [ + { + "bbox": [ + 123, + 119, + 367, + 129 + ], + "type": "text", + "content": "3 Purchase a cute water bottle for everyday use, under $15" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 123, + 130, + 380, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 130, + 380, + 140 + ], + "spans": [ + { + "bbox": [ + 123, + 130, + 380, + 140 + ], + "type": "text", + "content": "4 Compare different laptops and buy one with the best review" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 123, + 141, + 494, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 141, + 494, + 152 + ], + "spans": [ + { + "bbox": [ + 123, + 141, + 494, + 152 + ], + "type": "text", + "content": "5 Purchase three home workout items under \\(75 and compare their reviews before buying." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 123, + 152, + 510, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 152, + 510, + 173 + ], + "spans": [ + { + "bbox": [ + 123, + 152, + 510, + 173 + ], + "type": "text", + "content": " 6 Find and order a customized gift (e.g., engraved or personalized) for a friend's graduation under $60." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 123, + 174, + 493, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 174, + 493, + 185 + ], + "spans": [ + { + "bbox": [ + 123, + 174, + 493, + 185 + ], + "type": "text", + "content": " 7 Order a complete warm and durable winter outfit (jacket, gloves, and boots) under $200." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 123, + 185, + 510, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 185, + 510, + 205 + ], + "spans": [ + { + "bbox": [ + 123, + 185, + 510, + 205 + ], + "type": "text", + "content": "8 Get two sets of reusable grocery bags under \\(20 total, checking for durability and eco-friendliness." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 206, + 510, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 206, + 510, + 218 + ], + "spans": [ + { + "bbox": [ + 121, + 206, + 510, + 218 + ], + "type": "text", + "content": "9 Buy two wall paintings for a family house, one for a 13-year old boy, one for a 6-year old girl" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 120, + 218, + 406, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 218, + 406, + 228 + ], + "spans": [ + { + "bbox": [ + 120, + 218, + 406, + 228 + ], + "type": "text", + "content": "10 Purchase a set of colorful coffee mugs under $20 with fun designs" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 120, + 229, + 482, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 229, + 482, + 240 + ], + "spans": [ + { + "bbox": [ + 120, + 229, + 482, + 240 + ], + "type": "text", + "content": "11 Buy a small easy-care indoor plant under \\(15 and schedule delivery within three days" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 120, + 240, + 468, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 240, + 468, + 251 + ], + "spans": [ + { + "bbox": [ + 120, + 240, + 468, + 251 + ], + "type": "text", + "content": "12 Get a colorful umbrella for under \\(30, making sure it's big enough for two people" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 120, + 251, + 510, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 251, + 510, + 272 + ], + "spans": [ + { + "bbox": [ + 120, + 251, + 510, + 272 + ], + "type": "text", + "content": "13 Buy a set of scented candles under $25, ensuring they have good reviews for long-lasting fragrance." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 120, + 272, + 451, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 272, + 451, + 283 + ], + "spans": [ + { + "bbox": [ + 120, + 272, + 451, + 283 + ], + "type": "text", + "content": "14 Find and purchase a durable phone case under $20 for an iPhone 14 Pro Max." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 120, + 284, + 434, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 284, + 434, + 294 + ], + "spans": [ + { + "bbox": [ + 120, + 284, + 434, + 294 + ], + "type": "text", + "content": "15 Order a cozy throw blanket under \\(30, checking for softness and warmth." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 120, + 295, + 400, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 295, + 400, + 305 + ], + "spans": [ + { + "bbox": [ + 120, + 295, + 400, + 305 + ], + "type": "text", + "content": "16 Buy a set of three face masks (reusable & breathable) under $15." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 120, + 306, + 455, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 306, + 455, + 316 + ], + "spans": [ + { + "bbox": [ + 120, + 306, + 455, + 316 + ], + "type": "text", + "content": "17 Get a wireless Bluetooth speaker under \\(40 with good bass and waterproofing." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 120, + 317, + 499, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 317, + 499, + 327 + ], + "spans": [ + { + "bbox": [ + 120, + 317, + 499, + 327 + ], + "type": "text", + "content": "18 Order a set of noise-canceling earplugs under $15, ensuring they're comfortable for sleep." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 120, + 327, + 405, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 327, + 405, + 338 + ], + "spans": [ + { + "bbox": [ + 120, + 327, + 405, + 338 + ], + "type": "text", + "content": "19 Find and buy a compact travel pillow and eye mask set under $30." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 120, + 338, + 415, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 338, + 415, + 350 + ], + "spans": [ + { + "bbox": [ + 120, + 338, + 415, + 350 + ], + "type": "text", + "content": "20 Purchase a set of six kitchen towels under \\(20 with high absorbency." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 120, + 350, + 433, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 350, + 433, + 360 + ], + "spans": [ + { + "bbox": [ + 120, + 350, + 433, + 360 + ], + "type": "text", + "content": "21 Buy an adjustable desk lamp under \\(35 with multiple brightness settings." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 120, + 360, + 452, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 360, + 452, + 371 + ], + "spans": [ + { + "bbox": [ + 120, + 360, + 452, + 371 + ], + "type": "text", + "content": "22 Order a pack of 12 gel pens under \\(15 in assorted colors with smooth writing." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 120, + 372, + 476, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 372, + 476, + 382 + ], + "spans": [ + { + "bbox": [ + 120, + 372, + 476, + 382 + ], + "type": "text", + "content": "23 Purchase a waterproof picnic blanket under \\(40, ensuring it's easy to fold and carry." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 120, + 383, + 425, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 383, + 425, + 393 + ], + "spans": [ + { + "bbox": [ + 120, + 383, + 425, + 393 + ], + "type": "text", + "content": "24 Buy a cute yet professional notebook under \\(20 for journaling or work." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 120, + 393, + 510, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 393, + 510, + 404 + ], + "spans": [ + { + "bbox": [ + 120, + 393, + 510, + 404 + ], + "type": "text", + "content": "25 Find and purchase a comfortable memory foam seat cushion under \\(35 for long sitting hours." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 120, + 404, + 383, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 404, + 383, + 415 + ], + "spans": [ + { + "bbox": [ + 120, + 404, + 383, + 415 + ], + "type": "text", + "content": "26 Order a set of reusable silicone food storage bags under $25." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 120, + 415, + 506, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 415, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 120, + 415, + 506, + 426 + ], + "type": "text", + "content": "27 Buy a pair of comfy indoor slippers under \\(30 with high reviews for warmth and durability." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 120, + 426, + 408, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 426, + 408, + 437 + ], + "spans": [ + { + "bbox": [ + 120, + 426, + 408, + 437 + ], + "type": "text", + "content": "28 Purchase a portable mini humidifier under \\(40 with USB charging." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 120, + 437, + 478, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 437, + 478, + 448 + ], + "spans": [ + { + "bbox": [ + 120, + 437, + 478, + 448 + ], + "type": "text", + "content": "29 Order a stylish travel makeup bag under \\(25, ensuring it has multiple compartments." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 120, + 448, + 453, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 448, + 453, + 459 + ], + "spans": [ + { + "bbox": [ + 120, + 448, + 453, + 459 + ], + "type": "text", + "content": "30 Find and order a surprise gift box for a friend who enjoys skincare, under $50." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 120, + 459, + 441, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 459, + 441, + 470 + ], + "spans": [ + { + "bbox": [ + 120, + 459, + 441, + 470 + ], + "type": "text", + "content": "31 Compare wireless earbuds and purchase the best-reviewed pair under $100." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 120, + 470, + 480, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 470, + 480, + 480 + ], + "spans": [ + { + "bbox": [ + 120, + 470, + 480, + 480 + ], + "type": "text", + "content": "32 Order a budget-friendly yet stylish smartwatch under " + }, + { + "bbox": [ + 120, + 470, + 480, + 480 + ], + "type": "inline_equation", + "content": "\\$ {75}" + }, + { + "bbox": [ + 120, + 470, + 480, + 480 + ], + "type": "text", + "content": " ,ensuring good battery life." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 120, + 481, + 510, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 481, + 510, + 502 + ], + "spans": [ + { + "bbox": [ + 120, + 481, + 510, + 502 + ], + "type": "text", + "content": "33 Find and order a high-quality mechanical keyboard under $120, comparing typing feel and reviews" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 120, + 502, + 460, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 502, + 460, + 514 + ], + "spans": [ + { + "bbox": [ + 120, + 502, + 460, + 514 + ], + "type": "text", + "content": "34 Find and buy a useful desk gadget under \\(40 for a friend who works from home" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 120, + 514, + 510, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 514, + 510, + 535 + ], + "spans": [ + { + "bbox": [ + 120, + 514, + 510, + 535 + ], + "type": "text", + "content": "35 Plan flights for a trip from US to Europe (at least two different countries) for 3 days, comparing different airlines to find the best deal." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 120, + 536, + 510, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 536, + 510, + 556 + ], + "spans": [ + { + "bbox": [ + 120, + 536, + 510, + 556 + ], + "type": "text", + "content": "36 Plan a 5-day trip to Japan, booking both flights and hotels, taking into account customer reviews." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 120, + 557, + 510, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 557, + 510, + 579 + ], + "spans": [ + { + "bbox": [ + 120, + 557, + 510, + 579 + ], + "type": "text", + "content": "37 Book a hotel for a weekend trip for a good price near the beach within the country, making sure you can cancel the trip at any time" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 120, + 580, + 510, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 580, + 510, + 601 + ], + "spans": [ + { + "bbox": [ + 120, + 580, + 510, + 601 + ], + "type": "text", + "content": "38 Plan a spontaneous weekend trip to a destination with cheap last-minute flights and good hotel deals, for hotel make sure it's comfortable enough." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 120, + 601, + 510, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 601, + 510, + 622 + ], + "spans": [ + { + "bbox": [ + 120, + 601, + 510, + 622 + ], + "type": "text", + "content": "39 Book a luxury hotel for a weekend at a city in the west US, pay attention to different services offered" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 120, + 623, + 480, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 623, + 480, + 634 + ], + "spans": [ + { + "bbox": [ + 120, + 623, + 480, + 634 + ], + "type": "text", + "content": "40 Plan a three-stop European trip in a single week, with flights and hotel for each place" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 120, + 635, + 510, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 635, + 510, + 655 + ], + "spans": [ + { + "bbox": [ + 120, + 635, + 510, + 655 + ], + "type": "text", + "content": "41 Book hotel for a family tour of four to a kid-friendly destination, with a hotel offering family amenities and breakfast included." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 120, + 656, + 510, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 656, + 510, + 677 + ], + "spans": [ + { + "bbox": [ + 120, + 656, + 510, + 677 + ], + "type": "text", + "content": "42 Arrange a road trip across the US, booking rental cars and a mix of motels and boutique hotels along the route." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 120, + 678, + 510, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 678, + 510, + 699 + ], + "spans": [ + { + "bbox": [ + 120, + 678, + 510, + 699 + ], + "type": "text", + "content": "43 Book a romantic beach getaway in Hawaii for two people, make sure it's close to beach and have sea view" + } + ] + } + ], + "index": 44 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 53, + 135, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 53, + 135, + 64 + ], + "spans": [ + { + "bbox": [ + 52, + 53, + 135, + 64 + ], + "type": "text", + "content": "Full List of Tasks" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 100, + 55, + 192, + 68 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 55, + 192, + 68 + ], + "spans": [ + { + "bbox": [ + 100, + 55, + 192, + 68 + ], + "type": "text", + "content": "Task # Description" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 72, + 511, + 697 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 117, + 72, + 511, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 72, + 511, + 94 + ], + "spans": [ + { + "bbox": [ + 117, + 72, + 511, + 94 + ], + "type": "text", + "content": "44 Plan a family Disney Cruise, securing flights to Port Canaveral and a hotel near the theme parks before sailing." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 117, + 94, + 510, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 94, + 510, + 116 + ], + "spans": [ + { + "bbox": [ + 117, + 94, + 510, + 116 + ], + "type": "text", + "content": "45 Arrange a wine country getaway, booking flights to Napa Valley, a rental car, and a vineyard hotel with wine-tasting experiences." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 117, + 116, + 510, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 116, + 510, + 138 + ], + "spans": [ + { + "bbox": [ + 117, + 116, + 510, + 138 + ], + "type": "text", + "content": "46 Find flights and a convertible rental car for a coastal drive in Hawaii, staying in beachfront resorts along the way." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 117, + 137, + 492, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 137, + 492, + 149 + ], + "spans": [ + { + "bbox": [ + 117, + 137, + 492, + 149 + ], + "type": "text", + "content": "47 Choose flights to a popular ski destination and secure a lodge or hotel under \\(150/night." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 149, + 510, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 149, + 510, + 171 + ], + "spans": [ + { + "bbox": [ + 117, + 149, + 510, + 171 + ], + "type": "text", + "content": "48 Book last-minute flights and a centrally located hotel in a major US city, focusing on deals under $100/night with great city landscape view." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 117, + 171, + 510, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 171, + 510, + 192 + ], + "spans": [ + { + "bbox": [ + 117, + 171, + 510, + 192 + ], + "type": "text", + "content": "49 Secure round-trip flights to a scenic South American city and book a comfortable hotel near local attractions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 117, + 192, + 510, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 192, + 510, + 214 + ], + "spans": [ + { + "bbox": [ + 117, + 192, + 510, + 214 + ], + "type": "text", + "content": "50 Pick flights from a major US airport to a warm city in Canada, with a hotel under $100/night in the downtown area." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 117, + 214, + 510, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 214, + 510, + 236 + ], + "spans": [ + { + "bbox": [ + 117, + 214, + 510, + 236 + ], + "type": "text", + "content": "51 Schedule flights and a boutique hotel stay in a city rich in history, aiming for under $100/night in a central location." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 117, + 236, + 510, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 236, + 510, + 258 + ], + "spans": [ + { + "bbox": [ + 117, + 236, + 510, + 258 + ], + "type": "text", + "content": "52 Arrange direct flights to a popular theme park region, booking a nearby hotel or hotel with easy transportation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 117, + 258, + 510, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 258, + 510, + 280 + ], + "spans": [ + { + "bbox": [ + 117, + 258, + 510, + 280 + ], + "type": "text", + "content": "53 Schedule flights for a quick visit to a popular national park, booking a nearby lodge or hotel with scenic views." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 117, + 280, + 510, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 280, + 510, + 302 + ], + "spans": [ + { + "bbox": [ + 117, + 280, + 510, + 302 + ], + "type": "text", + "content": "54 Book round-trip flights to a major Middle Eastern city and reserve a modern hotel near historic sites for under $100/night" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 117, + 302, + 492, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 302, + 492, + 314 + ], + "spans": [ + { + "bbox": [ + 117, + 302, + 492, + 314 + ], + "type": "text", + "content": "55 Secure flights from the US to a tropical island, choosing a resort that offers water sports" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 117, + 313, + 510, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 313, + 510, + 335 + ], + "spans": [ + { + "bbox": [ + 117, + 313, + 510, + 335 + ], + "type": "text", + "content": "56 Find flights and a resort for a tropical vacation in Cancun, Mexico, focusing on all-inclusive options for relaxation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 117, + 335, + 510, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 335, + 510, + 357 + ], + "spans": [ + { + "bbox": [ + 117, + 335, + 510, + 357 + ], + "type": "text", + "content": "57 Book flights to Cairo for a 5-day trip, then pick a hotel with a direct view of the Pyramids and free breakfast included" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 117, + 357, + 510, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 357, + 510, + 379 + ], + "spans": [ + { + "bbox": [ + 117, + 357, + 510, + 379 + ], + "type": "text", + "content": "58 Book a solo retreat to Kyoto, Japan, selecting a traditional ryokan stay with an onsen and authentic Japanese breakfast." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 117, + 379, + 411, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 379, + 411, + 390 + ], + "spans": [ + { + "bbox": [ + 117, + 379, + 411, + 390 + ], + "type": "text", + "content": "59 Buy tickets for 2 people to an NBA Basketball game next weekend." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 117, + 390, + 510, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 390, + 510, + 411 + ], + "spans": [ + { + "bbox": [ + 117, + 390, + 510, + 411 + ], + "type": "text", + "content": "60 Find and book tickets for a concert by a top artist in the nearest major city within the next three months." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 117, + 411, + 420, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 411, + 420, + 422 + ], + "spans": [ + { + "bbox": [ + 117, + 411, + 420, + 422 + ], + "type": "text", + "content": "61 Search for a last-minute concert ticket and find the best available seat." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 117, + 422, + 406, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 422, + 406, + 434 + ], + "spans": [ + { + "bbox": [ + 117, + 422, + 406, + 434 + ], + "type": "text", + "content": "62 Book 3 tickets for a rivalry match between two major sports teams" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 117, + 434, + 510, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 434, + 510, + 456 + ], + "spans": [ + { + "bbox": [ + 117, + 434, + 510, + 456 + ], + "type": "text", + "content": "63 Book 3 tickets for a unique or unusual event, such as a drag show, wrestling match, or haunted experience" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 117, + 456, + 510, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 456, + 510, + 477 + ], + "spans": [ + { + "bbox": [ + 117, + 456, + 510, + 477 + ], + "type": "text", + "content": "64 Purchase four tickets for a Broadway musical happening next month, aiming for orchestra seats if possible." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 117, + 477, + 367, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 477, + 367, + 489 + ], + "spans": [ + { + "bbox": [ + 117, + 477, + 367, + 489 + ], + "type": "text", + "content": "65 Buy tickets for a family of 4 with 2 kids to a MLB game" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 117, + 489, + 510, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 489, + 510, + 510 + ], + "spans": [ + { + "bbox": [ + 117, + 489, + 510, + 510 + ], + "type": "text", + "content": "66 Find and book tickets to a popular stand-up comedy show in a western big city for the upcoming weekend, prioritizing seats near the front." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 117, + 510, + 471, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 510, + 471, + 521 + ], + "spans": [ + { + "bbox": [ + 117, + 510, + 471, + 521 + ], + "type": "text", + "content": "67 Locate discounted tickets for a live theater performance in California this weekend" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 117, + 521, + 510, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 521, + 510, + 543 + ], + "spans": [ + { + "bbox": [ + 117, + 521, + 510, + 543 + ], + "type": "text", + "content": "Search for an NFL game next month and buy two tickets in a mid-priced seating section for some eastern teams" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 117, + 543, + 510, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 543, + 510, + 565 + ], + "spans": [ + { + "bbox": [ + 117, + 543, + 510, + 565 + ], + "type": "text", + "content": "69 Identify and reserve tickets for a children's matinee performance at a local venue, comparing any available family packages or group discounts." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 117, + 565, + 437, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 565, + 437, + 577 + ], + "spans": [ + { + "bbox": [ + 117, + 565, + 437, + 577 + ], + "type": "text", + "content": "70 Secure seats for a must-see hockey match, comparing \"Best Seat\" options." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 117, + 577, + 510, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 577, + 510, + 598 + ], + "spans": [ + { + "bbox": [ + 117, + 577, + 510, + 598 + ], + "type": "text", + "content": "71 Find tickets for a classical music or orchestra concert in the nearest major city next month, aiming for seats with a good view of the stage." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 117, + 598, + 510, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 598, + 510, + 620 + ], + "spans": [ + { + "bbox": [ + 117, + 598, + 510, + 620 + ], + "type": "text", + "content": "72 Buy tickets for two people to an English Premier League soccer match in London city center next weekend." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 117, + 620, + 510, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 620, + 510, + 641 + ], + "spans": [ + { + "bbox": [ + 117, + 620, + 510, + 641 + ], + "type": "text", + "content": "73 Find and purchase tickets to a major electronic music festival in Las Vegas within the next two months." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 117, + 641, + 510, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 641, + 510, + 664 + ], + "spans": [ + { + "bbox": [ + 117, + 641, + 510, + 664 + ], + "type": "text", + "content": "74 Book seats for a stand-up comedy show in downtown Chicago next month, make sure the location is in city center." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 117, + 664, + 510, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 664, + 510, + 687 + ], + "spans": [ + { + "bbox": [ + 117, + 664, + 510, + 687 + ], + "type": "text", + "content": "75 Search for tickets to a top-tier cricket match in Sydney next month, aiming for seats that offer a good view of the pitch" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 117, + 686, + 447, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 686, + 447, + 697 + ], + "spans": [ + { + "bbox": [ + 117, + 686, + 447, + 697 + ], + "type": "text", + "content": "76 Locate a family-friendly musical performance near your city for next month." + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 100, + 54, + 191, + 68 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 54, + 191, + 68 + ], + "spans": [ + { + "bbox": [ + 100, + 54, + 191, + 68 + ], + "type": "text", + "content": "Task # Description" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 113, + 72, + 511, + 521 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 117, + 72, + 511, + 93 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 72, + 511, + 93 + ], + "spans": [ + { + "bbox": [ + 117, + 72, + 511, + 93 + ], + "type": "text", + "content": "77 Purchase two tickets to an upcoming rugby match in Dublin next month, making sure seats are in a central section and remain under." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 117, + 94, + 511, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 94, + 511, + 115 + ], + "spans": [ + { + "bbox": [ + 117, + 94, + 511, + 115 + ], + "type": "text", + "content": "78 Find a highly rated ballet or opera production in Paris within the next two months, choose the seat in the second floor if available" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 117, + 116, + 498, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 116, + 498, + 127 + ], + "spans": [ + { + "bbox": [ + 117, + 116, + 498, + 127 + ], + "type": "text", + "content": "79 Find tickets to a major fashion event, such as a runway show or fashion week experience." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 117, + 127, + 510, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 127, + 510, + 148 + ], + "spans": [ + { + "bbox": [ + 117, + 127, + 510, + 148 + ], + "type": "text", + "content": "80 Look for tickets to a themed immersive dining experience (e.g., murder mystery dinner, fantasy-inspired restaurant)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 149, + 471, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 149, + 471, + 159 + ], + "spans": [ + { + "bbox": [ + 117, + 149, + 471, + 159 + ], + "type": "text", + "content": "81 Book tickets for UEFA soccer game between two Spanish teams for the next week" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 117, + 160, + 472, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 160, + 472, + 171 + ], + "spans": [ + { + "bbox": [ + 117, + 160, + 472, + 171 + ], + "type": "text", + "content": "82 Book a ticket for a rooftop movie screening or outdoor film festival in a major city." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 117, + 171, + 476, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 171, + 476, + 182 + ], + "spans": [ + { + "bbox": [ + 117, + 171, + 476, + 182 + ], + "type": "text", + "content": "83 Find tickets for an esports event and compare standard vs. premium seating options." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 117, + 182, + 392, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 182, + 392, + 193 + ], + "spans": [ + { + "bbox": [ + 117, + 182, + 392, + 193 + ], + "type": "text", + "content": "84 Book a ticket for a \"silent disco\" event in a city of your choice." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 117, + 193, + 511, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 193, + 511, + 214 + ], + "spans": [ + { + "bbox": [ + 117, + 193, + 511, + 214 + ], + "type": "text", + "content": "85 secure two tickets to a major MLB game in a well-known ballpark anywhere in the U.S. next month, opting for seats along the first baseline." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 117, + 214, + 510, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 214, + 510, + 236 + ], + "spans": [ + { + "bbox": [ + 117, + 214, + 510, + 236 + ], + "type": "text", + "content": "86 Find and book tickets for a large-scale country music festival occurring in the southern U.S. within the next two months, focusing on general admission passes." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 117, + 236, + 510, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 236, + 510, + 258 + ], + "spans": [ + { + "bbox": [ + 117, + 236, + 510, + 258 + ], + "type": "text", + "content": "87 Purchase seats for a top-tier college football rivalry game taking place within the next six weeks, ensuring you can view the marching band's performance easily." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 117, + 258, + 510, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 258, + 510, + 280 + ], + "spans": [ + { + "bbox": [ + 117, + 258, + 510, + 280 + ], + "type": "text", + "content": "88 Reserve tickets to a major NHL match in the next two months, choosing seats close to the ice." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 117, + 280, + 510, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 280, + 510, + 302 + ], + "spans": [ + { + "bbox": [ + 117, + 280, + 510, + 302 + ], + "type": "text", + "content": "89 Book passes for a nationally touring art exhibition or immersive art experience within the next two months, ensuring weekend availability." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 117, + 302, + 510, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 302, + 510, + 324 + ], + "spans": [ + { + "bbox": [ + 117, + 302, + 510, + 324 + ], + "type": "text", + "content": "90 Secure seats for a top-rated Broadway musical in New York City, making sure the date aligns with a Saturday evening performance." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 117, + 324, + 510, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 324, + 510, + 346 + ], + "spans": [ + { + "bbox": [ + 117, + 324, + 510, + 346 + ], + "type": "text", + "content": "91 Reserve a spot for a special museum or cultural center night event (e.g., \"Night at the Museum\" or themed after-hours) in a major U.S. city within the next two months." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 117, + 346, + 437, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 346, + 437, + 357 + ], + "spans": [ + { + "bbox": [ + 117, + 346, + 437, + 357 + ], + "type": "text", + "content": "92 Find the best deal on a new smartphone (latest model iPhone or Samsung)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 117, + 357, + 372, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 357, + 372, + 369 + ], + "spans": [ + { + "bbox": [ + 117, + 357, + 372, + 369 + ], + "type": "text", + "content": "93 Find the best dinner deal for two using food delivery apps" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 117, + 369, + 373, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 369, + 373, + 379 + ], + "spans": [ + { + "bbox": [ + 117, + 369, + 373, + 379 + ], + "type": "text", + "content": "94 Purchase an outfit for a formal event within a $150 budget" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 117, + 380, + 333, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 380, + 333, + 390 + ], + "spans": [ + { + "bbox": [ + 117, + 380, + 333, + 390 + ], + "type": "text", + "content": " 95 Buy a high-quality gaming chair for under $250" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 117, + 390, + 438, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 390, + 438, + 401 + ], + "spans": [ + { + "bbox": [ + 117, + 390, + 438, + 401 + ], + "type": "text", + "content": "96 Find and book the best available concert tickets for a top artist in your city" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 117, + 401, + 468, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 401, + 468, + 412 + ], + "spans": [ + { + "bbox": [ + 117, + 401, + 468, + 412 + ], + "type": "text", + "content": "97 Book tickets for a live theater performance and find a pre-show dinner reservation" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 117, + 412, + 363, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 412, + 363, + 423 + ], + "spans": [ + { + "bbox": [ + 117, + 412, + 363, + 423 + ], + "type": "text", + "content": "98 Plan a sports game outing for two within a $150 budget" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 117, + 423, + 358, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 423, + 358, + 434 + ], + "spans": [ + { + "bbox": [ + 117, + 423, + 358, + 434 + ], + "type": "text", + "content": "99 Plan a weekend getaway for two within a $500 budget" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 113, + 434, + 388, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 434, + 388, + 445 + ], + "spans": [ + { + "bbox": [ + 113, + 434, + 388, + 445 + ], + "type": "text", + "content": "100 Organize a one-day itinerary for a solo traveler in a major city" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 445, + 333, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 445, + 333, + 456 + ], + "spans": [ + { + "bbox": [ + 113, + 445, + 333, + 456 + ], + "type": "text", + "content": "101 Compare car rental options for a 5-day road trip" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 113, + 456, + 398, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 456, + 398, + 467 + ], + "spans": [ + { + "bbox": [ + 113, + 456, + 398, + 467 + ], + "type": "text", + "content": "102 Find and book a local escape room challenge for a group of four" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 467, + 358, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 467, + 358, + 477 + ], + "spans": [ + { + "bbox": [ + 113, + 467, + 358, + 477 + ], + "type": "text", + "content": "103 Plan a movie night with discounted tickets and snacks" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 477, + 400, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 477, + 400, + 489 + ], + "spans": [ + { + "bbox": [ + 113, + 477, + 400, + 489 + ], + "type": "text", + "content": "104 Find a highly-rated sushi restaurant and order a meal for delivery" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 113, + 489, + 368, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 489, + 368, + 499 + ], + "spans": [ + { + "bbox": [ + 113, + 489, + 368, + 499 + ], + "type": "text", + "content": "105 Plan a surprise birthday dinner at a fine dining restaurant" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 113, + 499, + 329, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 499, + 329, + 510 + ], + "spans": [ + { + "bbox": [ + 113, + 499, + 329, + 510 + ], + "type": "text", + "content": "106 Order a late-night snack under $15 for delivery" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 510, + 323, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 510, + 323, + 521 + ], + "spans": [ + { + "bbox": [ + 113, + 510, + 323, + 521 + ], + "type": "text", + "content": "107 Book a luxury hotel staycation for a weekend" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "text" + }, + { + "type": "table", + "bbox": [ + 153, + 563, + 458, + 696 + ], + "blocks": [ + { + "bbox": [ + 51, + 539, + 150, + 551 + ], + "lines": [ + { + "bbox": [ + 51, + 539, + 150, + 551 + ], + "spans": [ + { + "bbox": [ + 51, + 539, + 150, + 551 + ], + "type": "text", + "content": "Full List of Websites" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 153, + 563, + 458, + 696 + ], + "lines": [ + { + "bbox": [ + 153, + 563, + 458, + 696 + ], + "spans": [ + { + "bbox": [ + 153, + 563, + 458, + 696 + ], + "type": "table", + "html": "
NameURLTask Type
ACL Festivalaclfestival.comEntertainment
Amazonamazon.comShopping
Ammooraammoora.comEntertainment
Appleapple.comShopping
Artechouseartechouse.comEntertainment
Atom Ticketsatomtickets.comEntertainment
Best Buybestbuy.comShopping
Adidas Arenabilletterie.adidasarena.comEntertainment
Broadwaybroadway.comEntertainment
Charm City Clue Roomcharmcityclueroom.comEntertainment
City Passcitypass.comTravel Planning
CN Towercntower.caTravel Planning
Colorado Tourismcolorado.comTravel Planning
Corsaircorsair.comShopping
Coupon Followcouponfollow.comShopping
Crave 4Dcrave4d.comEntertainment
Dine Immersivedineimmersive.comFood
Disney Cruisedisneycruise.disney.go.comTravel Planning
DoorDashdoordash.comFood
Drone and DSLRdroneandslr.comShopping
Enterpriseenterprise.comTravel Planning
ESChartsescharts.comEntertainment
ETIXetix.comEntertainment
Eventbriteeventbrite.comEntertainment
Expediaexpedia.comTravel Planning
Fashion Week Onlinefashionweekonline.comEntertainment
Fever Upfeverup.comEntertainment
Googlegoogle.comTravel Planning
Google Mapsgoogle.com/mapsTravel Planning
Live Nationlivenation.comEntertainment
Library of Congressloc.govTravel Planning
LoL Esportslolesports.comEntertainment
MLBmlb.comEntertainment
MLB Ticketsmlbtickets.comEntertainment
NYICFFnyicff.orgEntertainment
OpenTableopentable.comFood
Postmatespostmates.comFood
Rakutenrakuten.comShopping
Redditredgit.comEntertainment
Retail Me Notretailmenot.comShopping
Road Trip USAroadtripusa.comTravel Planning
Samsungsamsung.comShopping
San Lorenzo DCsanlorenzodc.comFood
Screen Dailyscreendaily.comEntertainment
Secret Baltimoresecretbaltimore.comTravel Planning
Secret Labsecretlab.coShopping
Smithsonian Sleepoverssmithsoniansleepovers.orgEntertainment
StubHubstubhub.comEntertainment
The Bureau Fashion Weekthebureaufashionweek.comEntertainment
The Meltdownthemeltdown.comEntertainment
The UFLtheufl.comEntertainment
Ticketmasterticketmaster.comEntertainment
Ticketmaster Franceticketmaster.frEntertainment
Ticket Webticketweb.comEntertainment
TickPicktickpick.comEntertainment
TripAdvisortripadvisor.comTravel Planning
Two Step Inntwostepinn.comEntertainment
Two Step Inn Frontgatetwostepinn.frontgatetickets.comEntertainment
Uberuber.comTravel Planning
Uber Eatsubereats.comFood
Viatorviator.comTravel Planning
Vivid Seatsvividseats.comEntertainment
Washington Tourismwashington.orgTravel Planning
Yelpyelp.comFood
Zarazara.comShopping
", + "image_path": "f1bb03f90e27f5b8f5b31973278bf8c3eb46cfe6cc37a5093cd408495df0d85d.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "table_body" + } + ], + "index": 34 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 153, + 52, + 458, + 615 + ], + "blocks": [ + { + "bbox": [ + 153, + 52, + 458, + 615 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 54, + 133, + 67 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 54, + 133, + 67 + ], + "spans": [ + { + "bbox": [ + 52, + 54, + 133, + 67 + ], + "type": "text", + "content": "Word Frequency" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 68, + 294, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 68, + 294, + 135 + ], + "spans": [ + { + "bbox": [ + 50, + 68, + 294, + 135 + ], + "type": "text", + "content": "Figure 9 compares the most frequent instruction words in RealWebAssist with those from two common benchmarks, WebLINX and WebArena. The vocabulary used in RealWebAssist is more informal, as the dataset comes from natural spoken instructions. The tone is also more informal and conversational compared to WebLINX and WebArena." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 76, + 152, + 282, + 284 + ], + "blocks": [ + { + "bbox": [ + 76, + 152, + 282, + 284 + ], + "lines": [ + { + "bbox": [ + 76, + 152, + 282, + 284 + ], + "spans": [ + { + "bbox": [ + 76, + 152, + 282, + 284 + ], + "type": "image", + "image_path": "cf0f06fa68bc84e998eb72b04f0557365b08dfdb0d2f7b19b5f24754427b7de7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 297, + 293, + 330 + ], + "lines": [ + { + "bbox": [ + 50, + 297, + 293, + 330 + ], + "spans": [ + { + "bbox": [ + 50, + 297, + 293, + 330 + ], + "type": "text", + "content": "Figure 9: Word Cloud of the most frequent words in RealWebAssist v.s. common benchmarks WebLINX and WebArena." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 89, + 353, + 256, + 368 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 353, + 256, + 368 + ], + "spans": [ + { + "bbox": [ + 89, + 353, + 256, + 368 + ], + "type": "text", + "content": "Instructions for the participants" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 377, + 278, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 377, + 278, + 456 + ], + "spans": [ + { + "bbox": [ + 66, + 377, + 278, + 456 + ], + "type": "text", + "content": "Thank you for participating in our study! You'll be guiding another person who is controlling the computer on your behalf. Imagine you are helping a friend navigate a website remotely, giving step-by-step instructions to complete a task. Feel free to interpret the task as you see fit. Here are some guidelines to keep in mind:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 71, + 458, + 278, + 601 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 71, + 458, + 277, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 458, + 277, + 480 + ], + "spans": [ + { + "bbox": [ + 71, + 458, + 277, + 480 + ], + "type": "text", + "content": "- Give instructions as naturally as possible, just like you would in real life." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 71, + 483, + 277, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 483, + 277, + 505 + ], + "spans": [ + { + "bbox": [ + 71, + 483, + 277, + 505 + ], + "type": "text", + "content": "- You don't have to be overly precise—say what feels natural." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 71, + 507, + 278, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 507, + 278, + 541 + ], + "spans": [ + { + "bbox": [ + 71, + 507, + 278, + 541 + ], + "type": "text", + "content": "- You can only give one instruction at a time. After the operator follows your instruction, wait for them to complete it before giving the next step." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 71, + 543, + 278, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 543, + 278, + 576 + ], + "spans": [ + { + "bbox": [ + 71, + 543, + 278, + 576 + ], + "type": "text", + "content": "- Keep your instructions clear and concise, but don't stress too much about exact wording—just say what comes to mind!" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 71, + 578, + 277, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 578, + 277, + 601 + ], + "spans": [ + { + "bbox": [ + 71, + 578, + 277, + 601 + ], + "type": "text", + "content": "- You are allowed to instruct the operator to use Google to search for things." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 619, + 212, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 619, + 212, + 633 + ], + "spans": [ + { + "bbox": [ + 132, + 619, + 212, + 633 + ], + "type": "text", + "content": "Video Example" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 66, + 638, + 277, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 638, + 277, + 661 + ], + "spans": [ + { + "bbox": [ + 66, + 638, + 277, + 661 + ], + "type": "text", + "content": "A sample raw recording can be viewed via the link below (audio included)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 66, + 670, + 183, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 670, + 183, + 683 + ], + "spans": [ + { + "bbox": [ + 66, + 670, + 183, + 683 + ], + "type": "text", + "content": "https://youtu.be/CcyIt9tr5qo" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10449/7f439293-0959-4bd1-95b9-6ff52e6c616f_content_list.json b/data/2025/2504_10xxx/2504.10449/7f439293-0959-4bd1-95b9-6ff52e6c616f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..84a765edbabdf3145c410865ad5340772052dd8e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/7f439293-0959-4bd1-95b9-6ff52e6c616f_content_list.json @@ -0,0 +1,1389 @@ +[ + { + "type": "text", + "text": "M1: Towards Scalable Test-Time Compute with Mamba Reasoning Models", + "text_level": 1, + "bbox": [ + 171, + 98, + 684, + 140 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Junxiong Wang $^{1}$ , Wen-Ding Li $^{2}$ , Daniele Paliotta $^{3*}$", + "bbox": [ + 316, + 171, + 683, + 189 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Daniel Ritter2, Alexander M. Rush2, Tri Dao1,4", + "bbox": [ + 330, + 193, + 666, + 210 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ TogetherAI, $^{2}$ Cornell University, $^{3}$ University of Geneva, $^{4}$ Princeton University", + "bbox": [ + 210, + 220, + 784, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 277, + 540, + 295 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Effective reasoning is crucial to solving complex mathematical problems. Recent large language models (LLMs) have boosted performance by scaling test-time computation through long chain-of-thought reasoning. However, transformer-based models are inherently limited in extending context length due to their quadratic computational complexity and linear memory requirements. In this paper, we introduce a novel hybrid linear RNN reasoning model, M1, built on the Mamba architecture, which allows memory-efficient inference. Our approach leverages a distillation process from existing reasoning models and is further enhanced through RL training. Experimental results on the AIME and MATH benchmarks show that M1 not only outperforms previous linear RNN models but also matches the performance of state-of-the-art Deepseek R1 distilled reasoning models at a similar scale. We also compare our generation speed with a highly performant general purpose inference engine, vLLM, and observe more than a 3x speedup compared to a same size transformer. With throughput speedup, we are able to achieve higher accuracy compared to DeepSeek R1 distilled transformer reasoning models under a fixed generation time budget using self-consistency voting. Overall, we introduce a hybrid Mamba reasoning model and provide a more effective approach to scaling test-time generation using self-consistency or long chain of thought reasoning. Code and pre-trained checkpoints are open-sourced at github.com/jxiw/M1.", + "bbox": [ + 228, + 314, + 769, + 607 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 642, + 320, + 657 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Robust and effective reasoning is the cornerstone for successfully performing tasks in domains such as mathematics and programming. Additionally, performance on reasoning tasks can often be boosted by generating longer sequences and/or generating many sequences in parallel (Snell et al., 2024). However, current transformer-based large language models (LLMs) face significant challenges when tasked with processing long sequences with large batch sizes. These models are constrained by a quadratic increase in computational complexity as the sequence length grows, coupled with a linear escalation in memory requirements. This combination makes it increasingly difficult for models to scale efficiently when handling large inputs.", + "bbox": [ + 169, + 676, + 826, + 805 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although linear hybrid RNN models (Gu & Dao, 2024; Dao & Gu, 2024; Beck et al., 2024; Yang et al., 2024; Peng et al., 2023) have shown great potential as an alternative to transformer-based on general language models, their effectiveness on reasoning tasks remains unclear. Since modern reasoning models typically generate long chains of thought for challenging math questions, it is uncertain whether the performance of hybrid linear RNNs diminishes in such scenarios.", + "bbox": [ + 169, + 809, + 828, + 893 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10449v3 [cs.LG] 9 Sep 2025", + "bbox": [ + 22, + 282, + 60, + 710 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Work done when interned at TogetherAI", + "bbox": [ + 189, + 909, + 467, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we propose M1 and show that it is possible to derive strong hybrid reasoning models by efficiently transferring reasoning capabilities from a large transformer model. Our training process involves distilling knowledge, incorporating math and reasoning abilities through supervised fine-tuning (SFT), and finally, boosting performance using reinforcement learning (RL) training. In total, the training process requires fewer than 50 billion tokens. In contrast, DeepSeek-R1-Distill-Qwen-1.5B is finetuned from Qwen2.5 MATH 1.5B which is trained using over 1 trillion MATH tokens on top of Qwen2.5.", + "bbox": [ + 169, + 103, + 823, + 202 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We demonstrate that our hybrid models achieve a 3x speedup compared to transformers of the same size when served using a highly performant general purpose inference engine, vLLM, at large batch sizes. This gain is mainly due to large batches and long sequences, decoding being generally memory-bound. Lower memory usage of hybrid models can transform this advantage into a speed gain. The decoding speedup is approximately linear with the volume of model's memory access (Yuan et al., 2025).", + "bbox": [ + 169, + 208, + 823, + 292 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Notably, this speedup can be converted to a gain in reasoning accuracy. Studies (Snell et al., 2024; Li, 2025; Chen et al., 2025) show that techniques such as self-consistency (Wang et al., 2023) and verification (Cobbe et al., 2021) at test time can significantly boost model reasoning performance. Under these conditions, a high-throughput model can further enhance its performance by generating more samples.", + "bbox": [ + 169, + 297, + 823, + 369 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The paper is organized as follows. Section 2 covers related work, Section 3 introduces our pipeline for distilling a hybrid reasoning model, and Section 4.1 presents our results evaluating M1 on math benchmarks. Sections 4.2 and 4.3 evaluate the performance gains of M1 in terms of both inference speed and scaling test-time compute. Section 5 provides some additional analysis of the impact of different generation lengths when training on RL, and of the impact of the different steps of the distillation pipeline we propose on performance.", + "bbox": [ + 169, + 375, + 823, + 460 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Overall, we show that M1 performs on par with DeepSeek-R1-Distill-Qwen-1.5B, achieving scores of 82 on MATH500 (Hendrycks et al., 2021), 23 on AIME25 (MAA, 2025), 28 on AIME24 (MAA, 2024), and 47 on OlympiadBench (He et al., 2024), while offering $3 \\times$ faster inference throughput, even compared to the highly optimized vLLM (Kwon et al., 2023) implementation for Transformer models.", + "bbox": [ + 169, + 465, + 823, + 536 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 171, + 556, + 328, + 571 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Reasoning models", + "text_level": 1, + "bbox": [ + 171, + 589, + 351, + 604 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent models like Deepseek-R1 (DeepSeek-AI et al., 2025) have shown the potential of RL training to improve performance on verifiable reasoning tasks, such as math problem solving and programming. Additional work has proposed methods for inducing this reasoning behavior via supervised fine-tuning, either on curated data (Muennighoff et al., 2025) or on generated pairs of traces (Yang et al., 2025). Other approaches also combine search procedures such as MCTS with language models (Qi et al., 2024) or alter standard RL training schemes to control the length of generated outputs (Aggarwal & Welleck, 2025). After training, these models solve complex tasks by generating long chains of thought, which often include subtasks of the overall problem, multiple attempted solutions, and backtracking over prior attempts (Gandhi et al., 2025). Since the performance of these models, both during training and inference, relies on generating lengthy chains of thought, more efficient architectures can enable larger scale training and less costly generation.", + "bbox": [ + 169, + 614, + 826, + 782 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 Enhancing Reasoning via Scaled Inference Compute", + "text_level": 1, + "bbox": [ + 169, + 800, + 602, + 815 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Increasing the computational budget during inference has become a promising approach to boost LLM performance. Methods like Chain of Thought (CoT) and its derivatives have achieved notable gains on reasoning benchmarks by breaking down complex tasks into intermediate steps (Wei et al., 2023; Yao et al., 2023). Although decomposing tasks improves reasoning, it also lengthens generation sequences and raises computational costs. Some recent studies even indicate that this extra computation might itself enhance model capabilities (Pfau et al., 2024). In addition, adaptive compute allocation during inference", + "bbox": [ + 169, + 825, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "has been explored. For example, Goyal et al. (2024) incorporated pause tokens into the vocabulary, allowing models to distribute compute more efficiently and improve both reasoning and overall task performance. LightTransfer (Zhang et al., 2024c) introduces a lightweight method that detects lazy layers and replaces their full attention with streaming attention—slashing KV-cache overhead and boosting throughput.", + "bbox": [ + 169, + 103, + 823, + 175 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Another strategy involves generating several outputs and selecting the best one. Researchers have developed various sampling algorithms to diversify and enhance the quality of generated responses, thereby increasing the chances of retrieving the most accurate answer (Wang et al., 2023; Renze & Guven, 2024; Zhang et al., 2023). Moreover, outcome and process reward models (ORMs and PRMs) have been introduced to evaluate responses and steer intermediate generation steps (Lightman et al., 2023; Zhang et al., 2024a; Luo et al., 2024; Uesato et al., 2022).", + "bbox": [ + 169, + 180, + 828, + 280 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recent investigations reveal that, under fixed compute budgets, smaller LLMs augmented with inference-time compute techniques (such as majority voting or PRM-guided search) can outperform larger models (Snell et al., 2024; Wu et al., 2024; Beeching et al., 2024). However, these results are mainly confined to Transformer-based architectures, leaving open questions about whether similar scaling laws hold for subquadratic architectures, which offer faster inference but might compromise on expressiveness.", + "bbox": [ + 169, + 284, + 826, + 371 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3 Alternatives to Transformer Architectures", + "text_level": 1, + "bbox": [ + 171, + 385, + 522, + 398 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Even though most reasoning models are based on the Transformer architecture (Grattaftori et al., 2024; Qwen et al., 2025), alternatives have been proposed to alleviate their high computational cost. Models built on top of RNNs (Beck et al., 2024; Peng et al., 2023), state space models (SSMs) (Gu et al., 2022; Gu & Dao, 2024), and linear attention mechanisms (Katharopoulos et al., 2020; Yang et al., 2024) demonstrate superior inference and memory efficiency, particularly for long-context tasks and large-batch generation. The Mamba series (Mamba-1 and Mamba-2) notably introduced selective state spaces to enable linear-time sequence modeling with strong performance (Gu & Dao, 2024; Dao & Gu, 2024). In addition, hybrid architectures that combine a few self-attention layers with subquadratic layers (e.g., Mamba) have emerged, showing advantages over both pure Transformer and pure subquadratic designs (Lieber et al., 2024; Ren et al., 2024). Such architectures are particularly suited to meet the high compute demands of inference-time scaling, and our work investigates their scaling properties.", + "bbox": [ + 169, + 411, + 828, + 594 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.4 Knowledge Distillation Strategies", + "text_level": 1, + "bbox": [ + 171, + 609, + 467, + 625 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Knowledge distillation has proven to be an effective means of transferring capabilities from large teacher models to smaller, more efficient student models (Hinton et al., 2015). In LLMs, this process compresses a larger pre-trained model into a more compact version while preserving core knowledge and functionality (Gu et al., 2024; Xu et al., 2024). Although larger models tend to exhibit superior reasoning abilities due to scaling properties (Xu et al., 2025; Wei et al., 2022), distillation techniques have enabled smaller models to achieve competitive reasoning performance (DeepSeek-AI et al., 2025; Labs, 2025). While most efforts have focused on intra-architecture distillation (e.g., Transformer-to-Transformer), recent studies have ventured into cross-architecture distillation. For instance, pretrained Transformers have been distilled into architectures such as RNNs (Kasai et al., 2021; Mercat et al., 2024), linear attention models (Zhang et al., 2024b; Zhang et al.), convolutional networks (Ralambomihanta et al., 2024), and SSMs (Bick et al., 2024; Wang et al., 2024b; Paliotta et al., 2025). Whether the robust reasoning abilities of Deepseek R1 (DeepSeek-AI et al., 2025) distilled models can be effectively transferred across different architectures remains an open question.", + "bbox": [ + 169, + 635, + 828, + 844 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 The M1 Reasoning Model", + "text_level": 1, + "bbox": [ + 171, + 863, + 439, + 882 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we present a multi-stage process for building our hybrid linear RNN reasoning model, M1. The approach has three stages: distillation, SFT, and RL. We begin by", + "bbox": [ + 169, + 895, + 828, + 926 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Initializing MAMBAINLLAMA" + ], + "code_body": "1: Shapes: B - Batch, L - Length, D - embed size, $N = D$ /Attention_heads, $N'$ - expand \n2: Input: $o_t$ : (B, D) \n3: Output: output: (B, D) \n4: New Params: MLP, A \n5: for each head $\\mathbf{W}^K, \\mathbf{W}^Q, \\mathbf{W}^V, \\mathbf{W}^o : (N, D)$ after expanding to same dimension do \n6: Head Parameter: A : (N, $N'$ ) \n7: for all positions t: \n8: $x_t : (B, N) \\leftarrow \\mathbf{W}^V o_t$ \n9: $\\mathbf{B}_t : (B, N) \\leftarrow \\mathbf{W}^K o_t$ \n10: $\\mathbf{C}_t : (B, N) \\leftarrow \\mathbf{W}^Q o_t$ \n11: $\\Delta_t : (B, N') \\leftarrow \\mathrm{MLP}(x_t)$ \n12: $\\overline{\\mathbf{A}}_{1:T}, \\overline{\\mathbf{B}}_{1:T}, \\overline{\\mathbf{C}}_{1:T} : (B, N, N') \\leftarrow \\mathrm{DISC}(\\mathbf{A}, \\mathbf{B}, \\mathbf{C}, \\Delta)$ \n13: $y \\gets \\mathrm{LINEARRNN}(\\overline{\\mathbf{A}}, \\overline{\\mathbf{B}}, \\overline{\\mathbf{C}}, x)$ \n14: output $\\leftarrow$ output + $\\mathbf{W}^{OT} y$ \n15: end for \n16: return output", + "bbox": [ + 173, + 121, + 823, + 356 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "distilling a Transformer model into a Mamba architecture, adapting the method of Wang et al. (2024a), which initializes the hybrid model's weights from a transformer model. We then perform math-specific supervised fine-tuning (SFT) on general mathematical datasets to enhance the model's mathematical performance, first without yet incorporating datasets generated by reasoning-focused models, and then with reasoning data leveraging multiple large-scale datasets generated by the R1 model series. Finally, we apply R1's GRPO method to further enhance the model's math reasoning capability.", + "bbox": [ + 169, + 380, + 826, + 481 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Stage 1: Distillation. The first step in building our M1 model is distilling a pretrained transformer model into a Mamba model. We adapt the distillation approach introduced by Wang et al. (2024a).", + "bbox": [ + 169, + 494, + 823, + 539 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The MAMBAINLLAMA framework (Wang et al., 2024a) proposes distilling hybrid Transformer-Mamba models by reusing weights from attention layers. In this distillation procedure, outlined in Algorithm 1, linear projections for $\\mathbf{Q},\\mathbf{K},\\mathbf{V}$ , and $\\mathbf{O}$ are initialized from the corresponding projections for $\\mathbf{C},\\mathbf{B},\\mathbf{X}$ , and $\\mathbf{O}$ , respectively. The newly introduced parameters in the Mamba layers are the sampling rate $\\Delta$ and the dynamic parameter $\\mathbf{A}$ , which control the resulting Mamba module via a discretization function. Specifically, the sampling rate $\\Delta \\in \\mathbb{R}^{N'}$ discretizes $\\mathbf{B}_t,\\mathbf{C}_t \\in \\mathbb{R}^{N\\times 1}$ , yielding $\\overline{\\mathbf{B}}_t,\\overline{\\mathbf{C}}_t \\in \\mathbb{R}^{N'\\times N\\times 1}$ , as detailed in Algorithm 1. Different from Wang et al. (2024a), we introduce two additional linear layers to project from head.dim $*$ kv_head to head.dim $*$ n_head. This is because GQA (Ainslie et al., 2023) is used in the transformer model to reduce the KV cache. As Mamba does not utilize a KV cache, this expansion can increase the expressiveness of $\\mathbf{B}$ and $\\mathbf{X}$ .", + "bbox": [ + 169, + 542, + 826, + 702 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We directly reuse the MLP layers; however, unlike the original approach, we replace the attention layers with Mamba layers in a single step. Subsequently, we fine-tune the entire model to expedite the training process. The distillation step involves minimizing the token-level KL divergence, aligning the entire probability distribution of the student model, $p(\\cdot ;\\theta)$ , with the teacher model, $p(\\cdot ;\\theta_T)$ , for every candidate token at position $t$ . We use the reverse KL divergence, $D_{\\mathrm{KL}}(p(\\cdot ;\\theta)\\parallel p(\\cdot ;\\theta_T))$ , as our loss function rather than the forward KL divergence. We choose the reverse KL divergence due to its mode-seeking properties, which results in improved empirical performance.", + "bbox": [ + 169, + 705, + 828, + 821 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We reimplement the distillation and SFT framework using the Axolotl ${}^{1}$ training framework. We apply the model chat template,mask the user prompt,and compute the loss only over the tokens generated in the assistant's output. To speed up training,we use data packing to merge different sequences into a single one until we reach the maximum sequence length which is set to 8192 . We find that data packing achieves significantly better results compared", + "bbox": [ + 169, + 827, + 828, + 898 + ], + "page_idx": 3 + }, + { + "type": "footer", + "text": "1https://github.com/axolotl-ai-cloud/axolotl", + "bbox": [ + 189, + 909, + 519, + 922 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "to the non-packing version in distillation for the same training steps. We use the AdamW optimizer with learning rate $1 \\times 10^{-5}$ with cosine decay, $\\beta = (0.9, 0.95)$ and a weight decay of 0.1.", + "bbox": [ + 169, + 103, + 823, + 148 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stage 2: SFT Following the distillation procedure, we finetune the model on a large set of math problems, OpenMathInstruct-2 (Toshniwal et al., 2024). As in the distillation stage, we apply the chat template to the prompts, mask the user prompt, and compute the loss only over the tokens generated in the assistant's output. We train for two epochs using the same optimizer as distillation.", + "bbox": [ + 169, + 165, + 823, + 236 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After the initial fine-tuning stage, we finetune on an additional set of math problems and solutions generated by reasoning models. We collect a mixed reasoning dataset, including OpenR1-Math-220k $^{2}$ , OpenThoughts-114k-math $^{3}$ , and ServiceNow-AI-R1-Distill $^{4}$ , Magpie-Reasoning-250K $^{5}$ for a total of 10B reasoning tokens. The first two datasets were generated from R1, while the last two was generated from the R1 distilled Qwen 32B model and R1 distilled Llama 70B model. We extended the training length to 24,576 because we found that it covers $99\\%$ of the data items. We train the model for five epochs using the same optimizer as before but changing the peak learning rate to $6 \\times 10^{-6}$ .", + "bbox": [ + 169, + 241, + 826, + 359 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stage 3: Reasoning RL. To further enhance performance, we integrate Mamba with a RL pipeline for further training.6 We use GRPO as the loss function. Differing from (Shao et al., 2024), we remove the KL penalty term as empirically we find it destabilizes training. Additionally, we include an entropy bonus to encourage a more diverse policy. The resulting formula is,", + "bbox": [ + 169, + 377, + 823, + 449 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {\\tau \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}}} \\left[ \\frac {\\pi_ {\\theta} (a | s)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (a | s)} \\hat {A} (s, a) \\right] + \\eta H (\\pi_ {\\theta}) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 468, + 825, + 503 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\hat{A}(s, a)$ is the estimate of the advantage from multiple rollouts. We use a batch size of 128 and a PPO batch size of 64, which also determines the number of PPO iterations, $\\mu = 2$ . We set the number of generations for each sequence to 8 and the maximum generation length to 32k. For optimization, we use the Adam optimizer with a learning rate of $1 \\times 10^{-6}$ . We train for 50 steps, and pick the best checkpoint with the highest critic reward. We append the simple prompt \"Let's think step by step and output the final answer within $\\backslash$ boxed{}\") to the end of each question in both training and evaluation.", + "bbox": [ + 169, + 517, + 823, + 619 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 169, + 638, + 318, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Model. We adopt the Llama3.2-3B-Instruct models as distillation target models. For Mamba layers, we set the SSM state size to 16. Consequently, the number of SSM groups after expansion is $3072 / 16 = 192$ for the 3B model. We use 6 interleaved attention layers among 28 total layers.", + "bbox": [ + 169, + 672, + 823, + 729 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation Dataset. Following common practice in evaluating reasoning models, we use a similar set of math benchmarks, including competition-level problems: MATH500 (Hendrycks et al., 2021), AIME25 (MAA, 2025), AIME24 (MAA, 2024), AMC23 (MAA, 2023), and OlympiadBench (He et al., 2024).", + "bbox": [ + 169, + 746, + 826, + 804 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "$^{2}$ https://huggingface.co/datasets/open-r1/OpenR1-Math-220k", + "3https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k", + "4https://huggingface.co/datasets/ServiceNow-AI/R1-Distill-SFT", + "5https://huggingface.co/datasets/Magpie-Align/Magpie-Reasoning-V2-250K-CoT-Deepseek-R1-Llama-70B", + "6We add it into the popular VeRL (Sheng et al., 2024) framework. In doing so, we addressed and resolved the CUDA graph incompatibility issues that previously arose during training with PyTorch's FSDP module. As a result, the updated framework now efficiently supports Mamba generation with CUDA graph enabled, making it $5 \\times$ faster than with CUDA Graph disabled" + ], + "bbox": [ + 173, + 815, + 906, + 924 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 946, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation Metrics. Our model's performance is assessed using two key metrics: coverage and accuracy. In fields such as coding and formal proofs, where answers can be automatically verified, coverage translates directly to enhanced performance and is widely utilized (Chen et al., 2021; Brown et al., 2024). Coverage is often measured using the pass@k metric, with $k$ indicating the number of samples per problem (Chen et al., 2021; Brown et al., 2024). This metric estimates the likelihood that at least one correct solution exists among the $k$ samples. To minimize variance when calculating coverage, we employ the unbiased estimation formula from Chen et al. (2021). Specifically, we generate $N \\geq k$ total samples per task. The probability that a correct solution exists among a pool of $k$ generated samples can then be determined given the total number of correct solutions $C_i$ for each task.", + "bbox": [ + 169, + 103, + 826, + 243 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\text {p a s s} @ \\mathrm {k} = \\frac {1}{\\# \\text {o f p r o b l e m s}} \\sum_ {i = 1} ^ {\\# \\text {o f p r o b l e m s}} \\left(1 - \\frac {\\binom {N - C _ {i}} {k}}{\\binom {N} {k}}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 312, + 260, + 679, + 303 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We implement this formula using a numerically stable approach as recommended by Chen et al. (2021).", + "bbox": [ + 169, + 314, + 823, + 344 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "When using additional compute, we employ multiple aggregation strategies. The most straightforward method is majority voting, also known as self-consistency decoding (Wang et al., 2023), which takes the majority response among $k$ samples as the predicted answer, and uses that to compute the accuracy.", + "bbox": [ + 169, + 349, + 826, + 406 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Reasoning Evaluation", + "text_level": 1, + "bbox": [ + 171, + 422, + 375, + 439 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/af7b5b0224891cd05c71fc00b371b74a039f51d72feb6613abeb470751bc0fd4.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelAIME25AIME24MATH500AMC23OlympiadBench
Qwen2.5-Math-7B-Instruct-13.379.850.640.7
rStar-Math-7B (Guan et al., 2025)-26.778.447.547.1
Eurus-2-7B-PRIME (Cui et al., 2025)-26.779.257.842.1
Qwen2.5-7B-SimpleRL (Zeng et al., 2025)-26.782.462.543.3
DeepSeek-R1-Qwen-1.5B23.028.882.862.943.3
M1-3B23.528.982.162.847.3
", + "bbox": [ + 173, + 455, + 820, + 551 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/0ef1f9ee797eae585e8e0f9c0e13c326b96d0b7932710e6595d305162836c6bd.jpg", + "table_caption": [ + "Table 1: Evaluation results for M1-3B, DeepSeek-R1-Distill-Qwen-1.5B and other MATH models on MATH benchmarks" + ], + "table_footnote": [], + "table_body": "
ModelAIME25AIME24MATH500AMC23OlympiadBench
Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32
DeepSeek-R1-Qwen-1.5B23.035.028.849.282.891.062.954.243.380.3
M1-3B23.534.629.050.582.191.862.855.047.380.1
", + "bbox": [ + 173, + 612, + 821, + 667 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2: Maj@32 results comparing M1-3B with DeepSeek-R1-Distill-Qwen-1.5B.", + "bbox": [ + 205, + 676, + 787, + 694 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate our models using a temperature setting of 0.7 and a sequence length of $32\\mathrm{k}$ with evaluation tools in VeRL. We use $32\\mathrm{k}$ because it has become the standard for evaluating performance on reasoning models (DeepSeek-AI et al., 2025; Luo et al., 2025). We report the pass@1 metric averaged over 64 runs; for majority voting, we repeat the metric calculation 100 times.", + "bbox": [ + 169, + 707, + 823, + 777 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We report the accuracy of M1-3B and DeepSeek-R1-Distill-Qwen-1.5B in Table 1 and 2. We use the baseline DeepSeek-R1-Distill-Qwen-1.5B since a 3B R1 reasoning model is still not available. Although M1-3B has more parameters than DeepSeek-R1-Distill-Qwen-1.5B, its speed is still comparable even with shorter contexts, so we believe this is a fair comparison. Our model's performance is competitive with state-of-the-art open reasoning models in the same model size range and outperforms larger nonreasoning math transformer models. Our model performs slightly worse on AIME24 compared to the DeepSeek-R1-Distill-Qwen-1.5B model. Notably, DeepSeek-R1-Distill-Qwen-1.5B is built on top of the Qwen2.5 MATH models, which were finetuned with over 1T MATH tokens on top of the Qwen2.5 models, significantly more training data than what M1-3B used in total.", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Speed Evaluation", + "text_level": 1, + "bbox": [ + 171, + 103, + 344, + 118 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We benchmark inference time with our model against a transformer model (Llama-3.2.-3B (Grattafori et al., 2024)) of the same size. We use vLLM (version 0.6.3), which is the version used in VeRL for efficient rollouts. We also compare against DeepSeek-R1-Distill-Qwen-1.5B (DeepSeek-AI et al., 2025), a reasoning transformer model that is half the size of M1. This model has the same number of layers as the 3B parameter transformer, but the hidden dimension is half the size.", + "bbox": [ + 169, + 128, + 826, + 214 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "According to Luo et al. (2025), the average generation length of reasoning models on MATH questions is $4\\mathrm{k}$ to $5\\mathrm{k}$ . We therefore fix a decoding length of 4096 (and prompt length of 256) and benchmark our model across a range of batch sizes. We vary the batch size from 8 to 512, measuring the inference latency across different models.", + "bbox": [ + 169, + 220, + 823, + 277 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We perform our benchmarking on a single NVIDIA H100 GPU with greedy decoding. To ensure that every model generates up to the set maximum number of tokens, we use ignoreEOS=True. Before recording results, we warm up the system with two runs. The final performance metrics are then averaged over three subsequent runs. The inference speeds of the models across batch sizes are shown in Figure 1. M1 achieves a $3 \\times$ speedup over similarly-sized transformers when using a batch size of 512 and a decoding length of 4096, demonstrating its effectiveness in large-batch generation settings.", + "bbox": [ + 169, + 282, + 826, + 383 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The maximum length of generated sequences is also an important factor in RL training, as longer sequences allow the model to use more compute during learning by generating longer chains-of-thought, shown in Figure 5. To benchmark our model in this setting, we fix the batch size to 128, and vary the generation length. We compare against the same two models as in the batch size varying case, and the results are shown in Figure 2. As the generated sequence length increases, M1 achieves increasing speedups relative to the baseline models, and consistently generates at least $2x$ faster than Llama-3.2-3B (2.64x faster for the longest sequence length).", + "bbox": [ + 169, + 387, + 823, + 502 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/25786caaf4660f3a50a4304b56f28e47baa7bc387de66e47c6b53666bd7861e6.jpg", + "image_caption": [ + "Figure 1: Inference latency when using prompt length 256 and decoding length 4096." + ], + "image_footnote": [], + "bbox": [ + 176, + 513, + 486, + 654 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/44b9013e4fbeaff016b0fc6ef52a6593f403eb5688ad1a5169c408d03809e809.jpg", + "image_caption": [ + "Figure 2: Inference latency when using batch size 128." + ], + "image_footnote": [], + "bbox": [ + 513, + 513, + 821, + 655 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "It is well-known that LLM inference comprises a prefilling (compute-bound) and a decoding (memory-bound) stage. For math reasoning models, it is common to assume that decoding takes much longer than prefilling, since prefilling only uses a short MATH question, while decoding generates long answers. Under these settings, the process is memory-bound. Given that Mamba is highly memory-efficient and we only use a SSM state size of 16, these memory advantages translate into improved speed.", + "bbox": [ + 169, + 712, + 823, + 799 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 Test-Time Scaling", + "text_level": 1, + "bbox": [ + 171, + 813, + 346, + 830 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Given a fixed time budget, M1 can generate more sequences or longer sequences compared to a transformer model, which can hopefully boost its performance. We evaluate the effect of test-time compute scaling on model performance. We scale both the number of samples generated as well as the length of generated samples, to see if M1 benefits from additional compute along these axes. We aim to investigate whether the speed benefit from section 4.2 can translate into an accuracy gain.", + "bbox": [ + 169, + 839, + 823, + 926 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Scaling with majority vote.", + "bbox": [ + 171, + 103, + 377, + 119 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6b1ca9db7bacc1c9c40422de2135914d4e17cd7faa8f82f90eb9c99c152f264f.jpg", + "image_caption": [ + "Figure 3: Number of samples vs. AIME25 accuracy (left) and generation time (seconds) vs. AIME25 accuracy (right). Both graphs include pass@1 and majority voting accuracies for M1 and DeepSeek-R1-Distill-Qwen-1.5B." + ], + "image_footnote": [], + "bbox": [ + 205, + 150, + 485, + 286 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/be3f26d0becaf61488979db217d8ff3026af96e695f0ac6ae72c58686eacbfa9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 498, + 150, + 779, + 284 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The left side of Figure 3 shows the effect of scaling the number of generated samples (while fixing the maximum decoding length) on AIME25 accuracy. Both the baseline model and M1 see increasing accuracy as the number of samples increases, with M1 nearly matching the baseline performance for larger sample sizes. The efficient generation of M1 also means that generating large number of samples at test-time is faster than for the baseline transformer model.", + "bbox": [ + 169, + 356, + 823, + 439 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We quantify this efficiency in the right side of Figure 3, which compares the number of seconds spent generating samples against the resulting accuracy. To compute the time values on the x-axis, we find an optimal throughput value (in tokens per second) for each model by increasing batch sizes until throughput decreases. The optimal values were 7263 T/s for DeepSeek-R1-Distill-Qwen-1.5B, and 15169 T/s for M1. We then assume that each generated sample is maximum length (8K), and compute the seconds required for one sample from one model as 8K divided by the throughput. We then convert the left graph of Figure 3 into the right graph, by multiplying the number of samples for each datapoint by the seconds required per sample for each model. As an example, M1 requires roughly a half second (8K/15K) per sample, so the accuracy value for M1 at 32 samples on the left graph appears at approximately 16 seconds on the right graph.", + "bbox": [ + 169, + 446, + 826, + 602 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Scaling with longer sequences", + "text_level": 1, + "bbox": [ + 171, + 607, + 401, + 622 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 4 shows the effect of scaling the maximum length of the generated answer, while fixing the number of generated samples to one. For both the baseline and M1, increasing the maximum sequence length leads to increased accuracy, as shown in the left graph in Figure 4. After converting from generation length to the seconds required to generate (done in the same way as Figure 3, but dividing the generation length by throughput), we can see the accuracy gain per time spent generating on the right side of Figure 4. In this case, M1 actually gets a higher accuracy for the same amount of time spent generating at 4 of the 5 evaluated sequence lengths, showing the benefits of efficient generation for test-time compute scaling.", + "bbox": [ + 169, + 628, + 826, + 755 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Analysis", + "text_level": 1, + "bbox": [ + 171, + 777, + 284, + 795 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Increasing Training Length in RL boosts model performance", + "text_level": 1, + "bbox": [ + 171, + 811, + 624, + 827 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "With more efficient models, we can increase the length of sequences used in RL training, resulting in improved performance. Empirically, we see this in Figure 5, which shows an increase in accuracy on AIME25 as we scale up the length of sequences generated when training with GRPO. Training with sequences of maximum length 4096 results in accuracy below $10\\%$ , while allowing sequences up to length 24K boosts the accuracy up to $23\\%$ .", + "bbox": [ + 169, + 832, + 823, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "MATH Accuracy at each training stage", + "bbox": [ + 171, + 909, + 460, + 925 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b1e7d6ecf95fe62618e1249eae94f9acf09b3a8ec63f96d97012fbd3e875b444.jpg", + "image_caption": [ + "Figure 4: Generation length vs. AIME25 accuracy (left) and generation time (seconds) vs. AIME25 accuracy (right). Sampling for both models is done using a temperature of 0.8." + ], + "image_footnote": [], + "bbox": [ + 181, + 102, + 488, + 243 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8533bff62a3c4c790920a9ba838a5697f180ffd68c747d3f177580145292f4be.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 102, + 820, + 243 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/c2857ba74d1d22cbe528808f49d1edb1ad18d58f21f1c628b98c03710f9be0f0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 359, + 329, + 635, + 484 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/e798954b5932a8f37901dd63dee3afc69153018f0f305c98d5378ffc4d993d86.jpg", + "table_caption": [ + "Figure 5: Pass@1 vs. maximum sequence length in GRPO training" + ], + "table_footnote": [], + "table_body": "
MATH500AIME24
Distill380
Distill + SFT(MATH)450
Distill + SFT(MATH) + SFT(Reason)7422
Distill + SFT(MATH) + SFT(Reason) + RL8228
", + "bbox": [ + 251, + 521, + 746, + 606 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 3: M1 Accuracy after each training stage on MATH500 and AIME24.", + "bbox": [ + 227, + 614, + 767, + 631 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To identify which components of our training pipeline have the greatest impact on performance, we also evaluate intermediate versions of the model on MATH500 (Hendrycks et al., 2021) and AIME24 (MAA, 2024). The results of these evaluations are presented in Table 3. Each step of the training pipeline provides a boost to performance, with particularly large gains from fine-tuning on solutions from reasoning models ( $+29\\%$ on MATH500 and $+17\\%$ on AIME24).", + "bbox": [ + 169, + 680, + 826, + 765 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Direct Distillation from Reasoning Models We also attempted to distill from Deepseek-R1-Qwen-1.5B instead of Llama-3.2-3B. In this case, we did not SFT on OpenMathInstruct, and instead only SFT on the 10B reasoning data that we collected after distillation. We found that the distilled model's performance was poor (38% and 3.3% pass@1 accuracy on MATH500 and AIME24, respectively). Our hypothesis for why this occurs is that 10B tokens is insufficient to effectively transfer reasoning skills from the transformer to Mamba. Although curating a high-quality reasoning dataset demands significant time and effort, we begin by leveraging the standard MATH distillation dataset from OpenMathInstruct (Toshniwal et al., 2024) to first distill a strong MATH model. We then transform this MATH model into a reasoning model via SFT on the dedicated reasoning dataset. This approach achieves strong performance with a much smaller number of reasoning tokens.", + "bbox": [ + 169, + 770, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 102, + 308, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this paper, we introduced M1, a hybrid reasoning model built on the Mamba architecture, designed to address the scalability challenges of the Transformer models. We demonstrated effective techniques for distillation and finetuning to develop M1, which achieves mathematical reasoning performance comparable to state-of-the-art reasoning models of similar size. Notably, M1 delivers over 3x faster inference than similar-sized Transformer models, even when using the heavily optimized vLLM inference engine, particularly at large batch sizes. This improved efficiency can make the resource-intensive inference-time strategies, such as self-consistency, more practical. Our findings establish M1 as a strong alternative to Transformer-based architectures, paving the way for more efficient and high-performing reasoning models.", + "bbox": [ + 169, + 133, + 826, + 273 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 171, + 294, + 274, + 310 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697.", + "Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebron, and Sumit Sanghai. Gqa: Training generalized multi-query transformer models from multi-head checkpoints. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 4895-4901, 2023.", + "Maximilian Beck, Korbinian Poppel, Markus Spanring, Andreas Auer, Oleksandra Prudnikova, Michael Kopp, Günter Klambauer, Johannes Brandstetter, and Sepp Hochreiter. xlstm: Extended long short-term memory, 2024. URL https://arxiv.org/abs/2405.04517.", + "Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models, 2024. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute.", + "Aviv Bick, Kevin Y. Li, Eric P. Xing, J. Zico Kolter, and Albert Gu. Transformers to ssms: Distilling quadratic knowledge to subquadratic models, 2024. URL https://arxiv.org/abs/2408.10189.", + "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787.", + "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidy Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, and et. al. Evaluating large language models trained on code, 2021. URL https://arxiv.org/abs/2107.03374.", + "Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567, 2025.", + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems, 2021. URL https://arxiv.org/abs/2110.14168.", + "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025.", + "Tri Dao and Albert Gu. Transformers are ssms: Generalized models and efficient algorithms through structured state space duality, 2024. URL https://arxiv.org/abs/2405.21060." + ], + "bbox": [ + 173, + 316, + 828, + 925 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, and et. al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948.", + "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars, 2025. URL https://arxiv.org/abs/2503.01307.", + "Sachin Goyal, Ziwei Ji, Ankit Singh Rawat, Aditya Krishna Menon, Sanjiv Kumar, and Vaishnavh Nagarajan. Think before you speak: Training language models with pause tokens, 2024. URL https://arxiv.org/abs/2310.02226.", + "Aaron Grattaftiori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, and et. al. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783.", + "Albert Gu and Tri Dao. Mamba: Linear-time sequence modeling with selective state spaces, 2024. URL https://arxiv.org/abs/2312.00752.", + "Albert Gu, Karan Goel, and Christopher Ré. Efficiently modeling long sequences with structured state spaces, 2022. URL https://arxiv.org/abs/2111.00396.", + "Yuxian Gu, Li Dong, Furu Wei, and Minlie Huang. Minillm: Knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2306.08543.", + "Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025.", + "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems, 2024.", + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874.", + "Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network, 2015. URL https://arxiv.org/abs/1503.02531.", + "Jungo Kasai, Hao Peng, Yizhe Zhang, Dani Yogatama, Gabriel Ilharco, Nikolaos Pappas, Yi Mao, Weizhu Chen, and Noah A. Smith. Finetuning pretrained transformers into mns, 2021. URL https://arxiv.org/abs/2103.13076.", + "Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast autoregressive transformers with linear attention, 2020. URL https:// arxiv.org/abs/2006.16236.", + "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023.", + "Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, 2025. Accessed: 2025-01-22.", + "Xinzhe Li. A survey on llm test-time compute via search: Tasks, llm profiling, search algorithms, and relevant frameworks, 2025. URL https://arxiv.org/abs/2501.10069." + ], + "bbox": [ + 171, + 102, + 828, + 925 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Opher Lieber, Barak Lenz, Hofit Bata, Gal Cohen, Jhonathan Osin, Itay Dalmedigos, Erez Safahi, Shaked Meirom, Yonatan Belinkov, Shai Shalev-Shwartz, Omri Abend, Raz Alon, Tomer Asida, Amir Bergman, Roman Glozman, Michael Gokhman, Avashalom Manevich, Nir Ratner, Noam Rozen, Erez Shwartz, Mor Zusman, and Yoav Shoham. Jamba: A hybrid transformer-mamba language model, 2024. URL https://arxiv.org/abs/2403.19887.", + "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023. URL https://arxiv.org/abs/2305.20050.", + "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Meiqi Guo, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision, 2024. URL https://arxiv.org/abs/2406.06592.", + "Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. DeepScaler: Surpassing o1-preview with a 1.5b model by scaling rl. https://pretty-radio-b75.notion.site/ DeepScaleR-Surpassing-01-Preview-with-a-1-5B-Model-by-Scaling-RL-19681902c1468005bed8ca303013a4e2, 2025. Notion Blog.", + "MAA. American invitational mathematics examination 2023, 2023. URL https://artofproblemsolving.com/wiki/index.php/American_Invitational_Mathematics_Examination?srltid=AfmBOoqiDCiaGTLQrsRTKsZui8RFnjOZqM4qIqY3yGB3sBaqOaxwf_Xt.", + "MAA. American invitationalal mathematics examination 2024, 2024. URL https://artofproblemsolving.com/wiki/index.php/American_Invitationalal_Mathematics_Examination?srltid=AfmBOoqiDCiaGTLQrsRTKsZui8RFnj0ZqM4qIqY3yGB3sBaqOaxwf_Xt.", + "MAA. American invitational mathematics examination 2025, 2025. URL https://artofproblemsolving.com/wiki/index.php/American_Invitational_Mathematics_Examination?srltid=AfmB0oqiDCiaGTLQrsRTKsZui8RFnjOZqM4qIqY3yGB3sBaqOaxwf_Xt.", + "Jean Mercat, Igor Vasiljevic, Sedrick Keh, Kushal Arora, Achal Dave, Adrien Gaidon, and Thomas Kollar. Linearizing large language models, 2024. URL https://arxiv.org/abs/2405.06640.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candes, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393.", + "Daniele Paliotta, Junxiong Wang, Matteo Pagliardini, Kevin Y Li, Aviv Bick, J Zico Kolter, Albert Gu, François Fleuret, and Tri Dao. Thinking slow, fast: Scaling inference compute with distilled reasoners. arXiv preprint arXiv:2502.20339, 2025.", + "Bo Peng, Eric Alcaide, Quentin Anthony, Alon Albalak, Samuel Arcadinho, Stella Biderman, Huanqi Cao, Xin Cheng, Michael Chung, Matteo Grella, Kranthi Kiran GV, Xuzheng He, Haowen Hou, Jiaju Lin, Przemyslaw Kazienko, Jan Kocon, Jiaming Kong, Bartlomiej Koptyra, Hayden Lau, and et. al. Rwkv: Reinventing rnns for the transformer era, 2023. URL https://arxiv.org/abs/2305.13048.", + "Jacob Pfau, William Merrill, and Samuel R. Bowman. Let's think dot by dot: Hidden computation in transformer language models, 2024. URL https://arxiv.org/abs/2404.15758.", + "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers, 2024. URL https://arxiv.org/abs/2408.06195.", + "Qwen,:, An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, and et. al. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115." + ], + "bbox": [ + 173, + 102, + 998, + 924 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tokiniaina Raharison Ralambomihanta, Shahrad Mohammadzadeh, Mohammad Sami Nur Islam, Wassim Jabbour, and Laurence Liang. Scavenging hyena: Distilling transformers into long convolution models, 2024. URL https://arxiv.org/abs/2401.17574.", + "Liliang Ren, Yang Liu, Yadong Lu, Yelong Shen, Chen Liang, and Weizhu Chen. Samba: Simple hybrid state space models for efficient unlimited context language modeling, 2024. URL https://arxiv.org/abs/2406.07522.", + "Matthew Renze and Erhan Guven. The effect of sampling temperature on problem solving in large language models, 2024. URL https://arxiv.org/abs/2402.05201.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.", + "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https:// arxiv.org/abs/2408.03314.", + "Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data, 2024. URL https://arxiv.org/abs/2410.01560.", + "Jonathan Uesato, Nate Kushman, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process- and outcome-based feedback, 2022. URL https://arxiv.org/abs/2211.14275.", + "Junxiong Wang, Daniele Paliotta, Avner May, Alexander Rush, and Tri Dao. The mamba in the llama: Distilling and accelerating hybrid models. Advances in Neural Information Processing Systems, 37:62432-62457, 2024a.", + "Junxiong Wang, Daniele Paliotta, Avner May, Alexander M. Rush, and Tri Dao. The mamba in the llama: Distilling and accelerating hybrid models. arXiv preprint arXiv:2408.15237, 2024b.", + "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models, 2023. URL https://arxiv.org/abs/2203.11171.", + "Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. Emergent abilities of large language models, 2022. URL https://arxiv.org/abs/2206.07682.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed Chi, Quoc Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models, 2023. URL https://arxiv.org/abs/2201.11903.", + "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models, 2024. URL https://arxiv.org/abs/2408.00724.", + "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, Chenyang Shao, Yuwei Yan, Qinglong Yang, Yiwen Song, Sijian Ren, Xinyuan Hu, Yu Li, Jie Feng, Chen Gao, and Yong Li. Towards large reasoning models: A survey of reinforced reasoning with large language models, 2025. URL https://arxiv.org/abs/2501.09686." + ], + "bbox": [ + 173, + 102, + 826, + 924 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2402.13116.", + "Songlin Yang, Bailin Wang, Yikang Shen, Rameswar Panda, and Yoon Kim. Gated linear attention transformers with hardware-efficient training, 2024. URL https://arxiv.org/abs/2312.06635.", + "Wang Yang, Hongye Jin, Jingfeng Yang, Vipin Chaudhary, and Xiaotian Han. Thinking preference optimization, 2025. URL https://arxiv.org/abs/2502.13173.", + "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models, 2023. URL https://arxiv.org/abs/2305.10601.", + "Jingyang Yuan, Huazuo Gao, Damai Dai, Junyu Luo, Liang Zhao, Zhengyan Zhang, Zhenda Xie, YX Wei, Lean Wang, Zhiping Xiao, et al. Native sparse attention: Hardware-aligned and natively trainable sparse attention. arXiv preprint arXiv:2502.11089, 2025.", + "Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025. Notion Blog.", + "Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Restmcts*: Llm self-training via process reward guided tree search, 2024a. URL https://arxiv.org/abs/2406.03816.", + "Michael Zhang, Simran Arora, Rahul Chalamala, Benjamin Frederick Spector, Alan Wu, Krithik Ramesh, Aaryan Singhal, and Christopher Re. Lolcats: On low-rank linearizing of large language models. In The Thirteenth International Conference on Learning Representations.", + "Michael Zhang, Kush Bhatia, Hermann Kumbong, and Christopher Ré. The hedgehog & the porcupine: Expressive linear attentions with softmax mimicry, 2024b. URL https://arxiv.org/abs/2402.04347.", + "Shun Zhang, Zhenfang Chen, Yikang Shen, Mingyu Ding, Joshua B. Tenenbaum, and Chuang Gan. Planning with large language models for code generation, 2023. URL https://arxiv.org/abs/2303.05510.", + "Xuan Zhang, Fengzhuo Zhang, Cunxiao Du, Chao Du, Tianyu Pang, Wei Gao, and Min Lin. Lighttransfer: Your long-context llm is secretly a hybrid model with effortless adaptation. arXiv preprint arXiv:2410.13846, 2024c." + ], + "bbox": [ + 173, + 102, + 825, + 646 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A Limitations and Future Work", + "text_level": 1, + "bbox": [ + 173, + 670, + 468, + 686 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Speedup. Our current hybrid model is only $3 \\times$ faster than a Transformer of the same size when serving inference with vLLM. Recently, NVIDIA introduced a new hybrid Mamba kernel7, which could further boost the speed of hybrid models. Additionally, our attention implementation in hybrid models does not yet leverage the optimizations available in vLLM. Integrating M1 into vLLM could further boost performance by taking advantage of these attention speedups.", + "bbox": [ + 171, + 700, + 823, + 789 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Why do we not distill Qwen2.5 1.5B MATH model. We considered using the Qwen2.5 1.5B MATH Instruct model as the distillation target in the first stage. However, we found that the cross entropy loss of the Qwen 1.5B MATH model on the OpenMATH Instruct dataset (Toshniwal et al., 2024) exceeded 1.8, which is much higher than that of the Llama models (0.5). This suggests that, to mimic the Qwen2.5 model, we need a dataset generated from a large Qwen2.5 series model rather than this one generated from the Llama models. Dataset curation from Qwen Math models goes beyond the scope of this work.", + "bbox": [ + 171, + 801, + 823, + 901 + ], + "page_idx": 13 + }, + { + "type": "page_footnote", + "text": "7https://github.com/NVIDIA/Megatron-LM/commit/b957578e76a921209ef873cbbd389114a4042542", + "bbox": [ + 189, + 909, + 825, + 922 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Improvement on RL training speed. Recently, DeepSeek R1 (DeepSeek-AI et al., 2025) showed that reinforcement learning (RL) is a key component in improving model reasoning performance during post-training. Since then, recent research has predominantly relied on reinforcement learning (RL) as a training paradigm for reasoning models. However, training with RL requires the efficient generation of long sequences. For example, in VeRL (Sheng et al., 2024), the typical training batch size ranges from a few thousand to several thousand. DeepscaleR (Luo et al., 2025) also shows a significant accuracy boost when training RL with longer sequences, as it tends to enhance model performance by providing more steps for thorough reasoning. However, this shift towards reinforcement learning has resulted in the generation process becoming a significant bottleneck in reasoning model training, taking more than three times as long as the actor's weight update (forward + backward) according to the time profiling done for DeepscaleR (Luo et al., 2025). This need for efficient generation in RL presents a significant challenge for transformer models, namely due to the heavy computational burden imposed by large key-value caches during generation, especially for large batch sizes. Given their generation speed advantages, linear RNN models may be better suited for scaling RL training.", + "bbox": [ + 169, + 103, + 823, + 325 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 14 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10449/7f439293-0959-4bd1-95b9-6ff52e6c616f_model.json b/data/2025/2504_10xxx/2504.10449/7f439293-0959-4bd1-95b9-6ff52e6c616f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..028a9e9a0c8e4562ee1d51a9d5514b8d9ba24044 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/7f439293-0959-4bd1-95b9-6ff52e6c616f_model.json @@ -0,0 +1,2144 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.284, + 0.061, + 0.712 + ], + "angle": 270, + "content": "arXiv:2504.10449v3 [cs.LG] 9 Sep 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.685, + 0.141 + ], + "angle": 0, + "content": "M1: Towards Scalable Test-Time Compute with Mamba Reasoning Models" + }, + { + "type": "text", + "bbox": [ + 0.317, + 0.172, + 0.684, + 0.19 + ], + "angle": 0, + "content": "Junxiong Wang\\(^{1}\\), Wen-Ding Li\\(^{2}\\), Daniele Paliotta\\(^{3*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.331, + 0.194, + 0.667, + 0.212 + ], + "angle": 0, + "content": "Daniel Ritter2, Alexander M. Rush2, Tri Dao1,4" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.221, + 0.785, + 0.239 + ], + "angle": 0, + "content": "\\(^{1}\\)TogetherAI, \\(^{2}\\)Cornell University, \\(^{3}\\)University of Geneva, \\(^{4}\\)Princeton University" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.279, + 0.542, + 0.296 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.315, + 0.77, + 0.608 + ], + "angle": 0, + "content": "Effective reasoning is crucial to solving complex mathematical problems. Recent large language models (LLMs) have boosted performance by scaling test-time computation through long chain-of-thought reasoning. However, transformer-based models are inherently limited in extending context length due to their quadratic computational complexity and linear memory requirements. In this paper, we introduce a novel hybrid linear RNN reasoning model, M1, built on the Mamba architecture, which allows memory-efficient inference. Our approach leverages a distillation process from existing reasoning models and is further enhanced through RL training. Experimental results on the AIME and MATH benchmarks show that M1 not only outperforms previous linear RNN models but also matches the performance of state-of-the-art Deepseek R1 distilled reasoning models at a similar scale. We also compare our generation speed with a highly performant general purpose inference engine, vLLM, and observe more than a 3x speedup compared to a same size transformer. With throughput speedup, we are able to achieve higher accuracy compared to DeepSeek R1 distilled transformer reasoning models under a fixed generation time budget using self-consistency voting. Overall, we introduce a hybrid Mamba reasoning model and provide a more effective approach to scaling test-time generation using self-consistency or long chain of thought reasoning. Code and pre-trained checkpoints are open-sourced at github.com/jxiw/M1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.643, + 0.321, + 0.659 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.678, + 0.828, + 0.806 + ], + "angle": 0, + "content": "Robust and effective reasoning is the cornerstone for successfully performing tasks in domains such as mathematics and programming. Additionally, performance on reasoning tasks can often be boosted by generating longer sequences and/or generating many sequences in parallel (Snell et al., 2024). However, current transformer-based large language models (LLMs) face significant challenges when tasked with processing long sequences with large batch sizes. These models are constrained by a quadratic increase in computational complexity as the sequence length grows, coupled with a linear escalation in memory requirements. This combination makes it increasingly difficult for models to scale efficiently when handling large inputs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.81, + 0.829, + 0.895 + ], + "angle": 0, + "content": "Although linear hybrid RNN models (Gu & Dao, 2024; Dao & Gu, 2024; Beck et al., 2024; Yang et al., 2024; Peng et al., 2023) have shown great potential as an alternative to transformer-based on general language models, their effectiveness on reasoning tasks remains unclear. Since modern reasoning models typically generate long chains of thought for challenging math questions, it is uncertain whether the performance of hybrid linear RNNs diminishes in such scenarios." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.468, + 0.925 + ], + "angle": 0, + "content": "*Work done when interned at TogetherAI" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.203 + ], + "angle": 0, + "content": "In this paper, we propose M1 and show that it is possible to derive strong hybrid reasoning models by efficiently transferring reasoning capabilities from a large transformer model. Our training process involves distilling knowledge, incorporating math and reasoning abilities through supervised fine-tuning (SFT), and finally, boosting performance using reinforcement learning (RL) training. In total, the training process requires fewer than 50 billion tokens. In contrast, DeepSeek-R1-Distill-Qwen-1.5B is finetuned from Qwen2.5 MATH 1.5B which is trained using over 1 trillion MATH tokens on top of Qwen2.5." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.209, + 0.825, + 0.293 + ], + "angle": 0, + "content": "We demonstrate that our hybrid models achieve a 3x speedup compared to transformers of the same size when served using a highly performant general purpose inference engine, vLLM, at large batch sizes. This gain is mainly due to large batches and long sequences, decoding being generally memory-bound. Lower memory usage of hybrid models can transform this advantage into a speed gain. The decoding speedup is approximately linear with the volume of model's memory access (Yuan et al., 2025)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.299, + 0.825, + 0.371 + ], + "angle": 0, + "content": "Notably, this speedup can be converted to a gain in reasoning accuracy. Studies (Snell et al., 2024; Li, 2025; Chen et al., 2025) show that techniques such as self-consistency (Wang et al., 2023) and verification (Cobbe et al., 2021) at test time can significantly boost model reasoning performance. Under these conditions, a high-throughput model can further enhance its performance by generating more samples." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.376, + 0.825, + 0.462 + ], + "angle": 0, + "content": "The paper is organized as follows. Section 2 covers related work, Section 3 introduces our pipeline for distilling a hybrid reasoning model, and Section 4.1 presents our results evaluating M1 on math benchmarks. Sections 4.2 and 4.3 evaluate the performance gains of M1 in terms of both inference speed and scaling test-time compute. Section 5 provides some additional analysis of the impact of different generation lengths when training on RL, and of the impact of the different steps of the distillation pipeline we propose on performance." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.466, + 0.825, + 0.537 + ], + "angle": 0, + "content": "Overall, we show that M1 performs on par with DeepSeek-R1-Distill-Qwen-1.5B, achieving scores of 82 on MATH500 (Hendrycks et al., 2021), 23 on AIME25 (MAA, 2025), 28 on AIME24 (MAA, 2024), and 47 on OlympiadBench (He et al., 2024), while offering \\(3 \\times\\) faster inference throughput, even compared to the highly optimized vLLM (Kwon et al., 2023) implementation for Transformer models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.557, + 0.329, + 0.573 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.59, + 0.352, + 0.605 + ], + "angle": 0, + "content": "2.1 Reasoning models" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.615, + 0.827, + 0.784 + ], + "angle": 0, + "content": "Recent models like Deepseek-R1 (DeepSeek-AI et al., 2025) have shown the potential of RL training to improve performance on verifiable reasoning tasks, such as math problem solving and programming. Additional work has proposed methods for inducing this reasoning behavior via supervised fine-tuning, either on curated data (Muennighoff et al., 2025) or on generated pairs of traces (Yang et al., 2025). Other approaches also combine search procedures such as MCTS with language models (Qi et al., 2024) or alter standard RL training schemes to control the length of generated outputs (Aggarwal & Welleck, 2025). After training, these models solve complex tasks by generating long chains of thought, which often include subtasks of the overall problem, multiple attempted solutions, and backtracking over prior attempts (Gandhi et al., 2025). Since the performance of these models, both during training and inference, relies on generating lengthy chains of thought, more efficient architectures can enable larger scale training and less costly generation." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.801, + 0.603, + 0.816 + ], + "angle": 0, + "content": "2.2 Enhancing Reasoning via Scaled Inference Compute" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.827, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Increasing the computational budget during inference has become a promising approach to boost LLM performance. Methods like Chain of Thought (CoT) and its derivatives have achieved notable gains on reasoning benchmarks by breaking down complex tasks into intermediate steps (Wei et al., 2023; Yao et al., 2023). Although decomposing tasks improves reasoning, it also lengthens generation sequences and raises computational costs. Some recent studies even indicate that this extra computation might itself enhance model capabilities (Pfau et al., 2024). In addition, adaptive compute allocation during inference" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.176 + ], + "angle": 0, + "content": "has been explored. For example, Goyal et al. (2024) incorporated pause tokens into the vocabulary, allowing models to distribute compute more efficiently and improve both reasoning and overall task performance. LightTransfer (Zhang et al., 2024c) introduces a lightweight method that detects lazy layers and replaces their full attention with streaming attention—slashing KV-cache overhead and boosting throughput." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.181, + 0.829, + 0.281 + ], + "angle": 0, + "content": "Another strategy involves generating several outputs and selecting the best one. Researchers have developed various sampling algorithms to diversify and enhance the quality of generated responses, thereby increasing the chances of retrieving the most accurate answer (Wang et al., 2023; Renze & Guven, 2024; Zhang et al., 2023). Moreover, outcome and process reward models (ORMs and PRMs) have been introduced to evaluate responses and steer intermediate generation steps (Lightman et al., 2023; Zhang et al., 2024a; Luo et al., 2024; Uesato et al., 2022)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.285, + 0.828, + 0.372 + ], + "angle": 0, + "content": "Recent investigations reveal that, under fixed compute budgets, smaller LLMs augmented with inference-time compute techniques (such as majority voting or PRM-guided search) can outperform larger models (Snell et al., 2024; Wu et al., 2024; Beeching et al., 2024). However, these results are mainly confined to Transformer-based architectures, leaving open questions about whether similar scaling laws hold for subquadratic architectures, which offer faster inference but might compromise on expressiveness." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.386, + 0.523, + 0.4 + ], + "angle": 0, + "content": "2.3 Alternatives to Transformer Architectures" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.412, + 0.829, + 0.595 + ], + "angle": 0, + "content": "Even though most reasoning models are based on the Transformer architecture (Grattaftori et al., 2024; Qwen et al., 2025), alternatives have been proposed to alleviate their high computational cost. Models built on top of RNNs (Beck et al., 2024; Peng et al., 2023), state space models (SSMs) (Gu et al., 2022; Gu & Dao, 2024), and linear attention mechanisms (Katharopoulos et al., 2020; Yang et al., 2024) demonstrate superior inference and memory efficiency, particularly for long-context tasks and large-batch generation. The Mamba series (Mamba-1 and Mamba-2) notably introduced selective state spaces to enable linear-time sequence modeling with strong performance (Gu & Dao, 2024; Dao & Gu, 2024). In addition, hybrid architectures that combine a few self-attention layers with subquadratic layers (e.g., Mamba) have emerged, showing advantages over both pure Transformer and pure subquadratic designs (Lieber et al., 2024; Ren et al., 2024). Such architectures are particularly suited to meet the high compute demands of inference-time scaling, and our work investigates their scaling properties." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.61, + 0.468, + 0.625 + ], + "angle": 0, + "content": "2.4 Knowledge Distillation Strategies" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.636, + 0.829, + 0.845 + ], + "angle": 0, + "content": "Knowledge distillation has proven to be an effective means of transferring capabilities from large teacher models to smaller, more efficient student models (Hinton et al., 2015). In LLMs, this process compresses a larger pre-trained model into a more compact version while preserving core knowledge and functionality (Gu et al., 2024; Xu et al., 2024). Although larger models tend to exhibit superior reasoning abilities due to scaling properties (Xu et al., 2025; Wei et al., 2022), distillation techniques have enabled smaller models to achieve competitive reasoning performance (DeepSeek-AI et al., 2025; Labs, 2025). While most efforts have focused on intra-architecture distillation (e.g., Transformer-to-Transformer), recent studies have ventured into cross-architecture distillation. For instance, pretrained Transformers have been distilled into architectures such as RNNs (Kasai et al., 2021; Mercat et al., 2024), linear attention models (Zhang et al., 2024b; Zhang et al.), convolutional networks (Ralambomihanta et al., 2024), and SSMs (Bick et al., 2024; Wang et al., 2024b; Paliotta et al., 2025). Whether the robust reasoning abilities of Deepseek R1 (DeepSeek-AI et al., 2025) distilled models can be effectively transferred across different architectures remains an open question." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.864, + 0.44, + 0.883 + ], + "angle": 0, + "content": "3 The M1 Reasoning Model" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.896, + 0.829, + 0.927 + ], + "angle": 0, + "content": "In this section, we present a multi-stage process for building our hybrid linear RNN reasoning model, M1. The approach has three stages: distillation, SFT, and RL. We begin by" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.104, + 0.486, + 0.12 + ], + "angle": 0, + "content": "Algorithm 1 Initializing MAMBAINLLAMA" + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.122, + 0.825, + 0.357 + ], + "angle": 0, + "content": "1: Shapes: B - Batch, L - Length, D - embed size, \\(N = D\\)/Attention_heads, \\(N'\\) - expand \n2: Input: \\(o_t\\): (B, D) \n3: Output: output: (B, D) \n4: New Params: MLP, A \n5: for each head \\(\\mathbf{W}^K, \\mathbf{W}^Q, \\mathbf{W}^V, \\mathbf{W}^o : (N, D)\\) after expanding to same dimension do \n6: Head Parameter: A : (N, \\(N'\\)) \n7: for all positions t: \n8: \\(x_t : (B, N) \\leftarrow \\mathbf{W}^V o_t\\) \n9: \\(\\mathbf{B}_t : (B, N) \\leftarrow \\mathbf{W}^K o_t\\) \n10: \\(\\mathbf{C}_t : (B, N) \\leftarrow \\mathbf{W}^Q o_t\\) \n11: \\(\\Delta_t : (B, N') \\leftarrow \\mathrm{MLP}(x_t)\\) \n12: \\(\\overline{\\mathbf{A}}_{1:T}, \\overline{\\mathbf{B}}_{1:T}, \\overline{\\mathbf{C}}_{1:T} : (B, N, N') \\leftarrow \\mathrm{DISC}(\\mathbf{A}, \\mathbf{B}, \\mathbf{C}, \\Delta)\\) \n13: \\(y \\gets \\mathrm{LINEARRNN}(\\overline{\\mathbf{A}}, \\overline{\\mathbf{B}}, \\overline{\\mathbf{C}}, x)\\) \n14: output \\(\\leftarrow\\) output + \\(\\mathbf{W}^{OT} y\\) \n15: end for \n16: return output" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.381, + 0.828, + 0.482 + ], + "angle": 0, + "content": "distilling a Transformer model into a Mamba architecture, adapting the method of Wang et al. (2024a), which initializes the hybrid model's weights from a transformer model. We then perform math-specific supervised fine-tuning (SFT) on general mathematical datasets to enhance the model's mathematical performance, first without yet incorporating datasets generated by reasoning-focused models, and then with reasoning data leveraging multiple large-scale datasets generated by the R1 model series. Finally, we apply R1's GRPO method to further enhance the model's math reasoning capability." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.495, + 0.825, + 0.54 + ], + "angle": 0, + "content": "Stage 1: Distillation. The first step in building our M1 model is distilling a pretrained transformer model into a Mamba model. We adapt the distillation approach introduced by Wang et al. (2024a)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.544, + 0.828, + 0.703 + ], + "angle": 0, + "content": "The MAMBAINLLAMA framework (Wang et al., 2024a) proposes distilling hybrid Transformer-Mamba models by reusing weights from attention layers. In this distillation procedure, outlined in Algorithm 1, linear projections for \\(\\mathbf{Q},\\mathbf{K},\\mathbf{V}\\), and \\(\\mathbf{O}\\) are initialized from the corresponding projections for \\(\\mathbf{C},\\mathbf{B},\\mathbf{X}\\), and \\(\\mathbf{O}\\), respectively. The newly introduced parameters in the Mamba layers are the sampling rate \\(\\Delta\\) and the dynamic parameter \\(\\mathbf{A}\\), which control the resulting Mamba module via a discretization function. Specifically, the sampling rate \\(\\Delta \\in \\mathbb{R}^{N'}\\) discretizes \\(\\mathbf{B}_t,\\mathbf{C}_t \\in \\mathbb{R}^{N\\times 1}\\), yielding \\(\\overline{\\mathbf{B}}_t,\\overline{\\mathbf{C}}_t \\in \\mathbb{R}^{N'\\times N\\times 1}\\), as detailed in Algorithm 1. Different from Wang et al. (2024a), we introduce two additional linear layers to project from head.dim \\(*\\) kv_head to head.dim \\(*\\) n_head. This is because GQA (Ainslie et al., 2023) is used in the transformer model to reduce the KV cache. As Mamba does not utilize a KV cache, this expansion can increase the expressiveness of \\(\\mathbf{B}\\) and \\(\\mathbf{X}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.707, + 0.829, + 0.822 + ], + "angle": 0, + "content": "We directly reuse the MLP layers; however, unlike the original approach, we replace the attention layers with Mamba layers in a single step. Subsequently, we fine-tune the entire model to expedite the training process. The distillation step involves minimizing the token-level KL divergence, aligning the entire probability distribution of the student model, \\( p(\\cdot ;\\theta) \\), with the teacher model, \\( p(\\cdot ;\\theta_T) \\), for every candidate token at position \\( t \\). We use the reverse KL divergence, \\( D_{\\mathrm{KL}}(p(\\cdot ;\\theta)\\parallel p(\\cdot ;\\theta_T)) \\), as our loss function rather than the forward KL divergence. We choose the reverse KL divergence due to its mode-seeking properties, which results in improved empirical performance." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.828, + 0.829, + 0.9 + ], + "angle": 0, + "content": "We reimplement the distillation and SFT framework using the Axolotl \\( {}^{1} \\) training framework. We apply the model chat template,mask the user prompt,and compute the loss only over the tokens generated in the assistant's output. To speed up training,we use data packing to merge different sequences into a single one until we reach the maximum sequence length which is set to 8192 . We find that data packing achieves significantly better results compared" + }, + { + "type": "footer", + "bbox": [ + 0.191, + 0.91, + 0.521, + 0.924 + ], + "angle": 0, + "content": "1https://github.com/axolotl-ai-cloud/axolotl" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.149 + ], + "angle": 0, + "content": "to the non-packing version in distillation for the same training steps. We use the AdamW optimizer with learning rate \\(1 \\times 10^{-5}\\) with cosine decay, \\(\\beta = (0.9, 0.95)\\) and a weight decay of 0.1." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.166, + 0.825, + 0.237 + ], + "angle": 0, + "content": "Stage 2: SFT Following the distillation procedure, we finetune the model on a large set of math problems, OpenMathInstruct-2 (Toshniwal et al., 2024). As in the distillation stage, we apply the chat template to the prompts, mask the user prompt, and compute the loss only over the tokens generated in the assistant's output. We train for two epochs using the same optimizer as distillation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.242, + 0.827, + 0.361 + ], + "angle": 0, + "content": "After the initial fine-tuning stage, we finetune on an additional set of math problems and solutions generated by reasoning models. We collect a mixed reasoning dataset, including OpenR1-Math-220k\\(^{2}\\), OpenThoughts-114k-math\\(^{3}\\), and ServiceNow-AI-R1-Distill\\(^{4}\\), Magpie-Reasoning-250K\\(^{5}\\) for a total of 10B reasoning tokens. The first two datasets were generated from R1, while the last two was generated from the R1 distilled Qwen 32B model and R1 distilled Llama 70B model. We extended the training length to 24,576 because we found that it covers \\(99\\%\\) of the data items. We train the model for five epochs using the same optimizer as before but changing the peak learning rate to \\(6 \\times 10^{-6}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.378, + 0.825, + 0.45 + ], + "angle": 0, + "content": "Stage 3: Reasoning RL. To further enhance performance, we integrate Mamba with a RL pipeline for further training.6 We use GRPO as the loss function. Differing from (Shao et al., 2024), we remove the KL penalty term as empirically we find it destabilizes training. Additionally, we include an entropy bonus to encourage a more diverse policy. The resulting formula is," + }, + { + "type": "equation", + "bbox": [ + 0.31, + 0.469, + 0.826, + 0.504 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {\\tau \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}}} \\left[ \\frac {\\pi_ {\\theta} (a | s)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (a | s)} \\hat {A} (s, a) \\right] + \\eta H (\\pi_ {\\theta}) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.518, + 0.825, + 0.62 + ], + "angle": 0, + "content": "where \\(\\hat{A}(s, a)\\) is the estimate of the advantage from multiple rollouts. We use a batch size of 128 and a PPO batch size of 64, which also determines the number of PPO iterations, \\(\\mu = 2\\). We set the number of generations for each sequence to 8 and the maximum generation length to 32k. For optimization, we use the Adam optimizer with a learning rate of \\(1 \\times 10^{-6}\\). We train for 50 steps, and pick the best checkpoint with the highest critic reward. We append the simple prompt \"Let's think step by step and output the final answer within \\(\\backslash\\)boxed{}\") to the end of each question in both training and evaluation." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.64, + 0.319, + 0.658 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.673, + 0.825, + 0.731 + ], + "angle": 0, + "content": "Model. We adopt the Llama3.2-3B-Instruct models as distillation target models. For Mamba layers, we set the SSM state size to 16. Consequently, the number of SSM groups after expansion is \\( 3072 / 16 = 192 \\) for the 3B model. We use 6 interleaved attention layers among 28 total layers." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.747, + 0.827, + 0.805 + ], + "angle": 0, + "content": "Evaluation Dataset. Following common practice in evaluating reasoning models, we use a similar set of math benchmarks, including competition-level problems: MATH500 (Hendrycks et al., 2021), AIME25 (MAA, 2025), AIME24 (MAA, 2024), AMC23 (MAA, 2023), and OlympiadBench (He et al., 2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.816, + 0.617, + 0.83 + ], + "angle": 0, + "content": "\\(^{2}\\)https://huggingface.co/datasets/open-r1/OpenR1-Math-220k" + }, + { + "type": "ref_text", + "bbox": [ + 0.193, + 0.831, + 0.666, + 0.844 + ], + "angle": 0, + "content": "3https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k" + }, + { + "type": "ref_text", + "bbox": [ + 0.194, + 0.845, + 0.646, + 0.858 + ], + "angle": 0, + "content": "4https://huggingface.co/datasets/ServiceNow-AI/R1-Distill-SFT" + }, + { + "type": "ref_text", + "bbox": [ + 0.194, + 0.86, + 0.907, + 0.873 + ], + "angle": 0, + "content": "5https://huggingface.co/datasets/Magpie-Align/Magpie-Reasoning-V2-250K-CoT-Deepseek-R1-Llama-70B" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.874, + 0.825, + 0.925 + ], + "angle": 0, + "content": "6We add it into the popular VeRL (Sheng et al., 2024) framework. In doing so, we addressed and resolved the CUDA graph incompatibility issues that previously arose during training with PyTorch's FSDP module. As a result, the updated framework now efficiently supports Mamba generation with CUDA graph enabled, making it \\(5 \\times\\) faster than with CUDA Graph disabled" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.816, + 0.907, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.244 + ], + "angle": 0, + "content": "Evaluation Metrics. Our model's performance is assessed using two key metrics: coverage and accuracy. In fields such as coding and formal proofs, where answers can be automatically verified, coverage translates directly to enhanced performance and is widely utilized (Chen et al., 2021; Brown et al., 2024). Coverage is often measured using the pass@k metric, with \\( k \\) indicating the number of samples per problem (Chen et al., 2021; Brown et al., 2024). This metric estimates the likelihood that at least one correct solution exists among the \\( k \\) samples. To minimize variance when calculating coverage, we employ the unbiased estimation formula from Chen et al. (2021). Specifically, we generate \\( N \\geq k \\) total samples per task. The probability that a correct solution exists among a pool of \\( k \\) generated samples can then be determined given the total number of correct solutions \\( C_i \\) for each task." + }, + { + "type": "equation", + "bbox": [ + 0.313, + 0.261, + 0.68, + 0.304 + ], + "angle": 0, + "content": "\\[\n\\text {p a s s} @ \\mathrm {k} = \\frac {1}{\\# \\text {o f p r o b l e m s}} \\sum_ {i = 1} ^ {\\# \\text {o f p r o b l e m s}} \\left(1 - \\frac {\\binom {N - C _ {i}} {k}}{\\binom {N} {k}}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.315, + 0.825, + 0.345 + ], + "angle": 0, + "content": "We implement this formula using a numerically stable approach as recommended by Chen et al. (2021)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.35, + 0.827, + 0.407 + ], + "angle": 0, + "content": "When using additional compute, we employ multiple aggregation strategies. The most straightforward method is majority voting, also known as self-consistency decoding (Wang et al., 2023), which takes the majority response among \\(k\\) samples as the predicted answer, and uses that to compute the accuracy." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.424, + 0.377, + 0.44 + ], + "angle": 0, + "content": "4.1 Reasoning Evaluation" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.457, + 0.821, + 0.553 + ], + "angle": 0, + "content": "
ModelAIME25AIME24MATH500AMC23OlympiadBench
Qwen2.5-Math-7B-Instruct-13.379.850.640.7
rStar-Math-7B (Guan et al., 2025)-26.778.447.547.1
Eurus-2-7B-PRIME (Cui et al., 2025)-26.779.257.842.1
Qwen2.5-7B-SimpleRL (Zeng et al., 2025)-26.782.462.543.3
DeepSeek-R1-Qwen-1.5B23.028.882.862.943.3
M1-3B23.528.982.162.847.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.562, + 0.825, + 0.59 + ], + "angle": 0, + "content": "Table 1: Evaluation results for M1-3B, DeepSeek-R1-Distill-Qwen-1.5B and other MATH models on MATH benchmarks" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.613, + 0.822, + 0.669 + ], + "angle": 0, + "content": "
ModelAIME25AIME24MATH500AMC23OlympiadBench
Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32
DeepSeek-R1-Qwen-1.5B23.035.028.849.282.891.062.954.243.380.3
M1-3B23.534.629.050.582.191.862.855.047.380.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.207, + 0.678, + 0.788, + 0.695 + ], + "angle": 0, + "content": "Table 2: Maj@32 results comparing M1-3B with DeepSeek-R1-Distill-Qwen-1.5B." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.708, + 0.825, + 0.779 + ], + "angle": 0, + "content": "We evaluate our models using a temperature setting of 0.7 and a sequence length of \\(32\\mathrm{k}\\) with evaluation tools in VeRL. We use \\(32\\mathrm{k}\\) because it has become the standard for evaluating performance on reasoning models (DeepSeek-AI et al., 2025; Luo et al., 2025). We report the pass@1 metric averaged over 64 runs; for majority voting, we repeat the metric calculation 100 times." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.785, + 0.827, + 0.926 + ], + "angle": 0, + "content": "We report the accuracy of M1-3B and DeepSeek-R1-Distill-Qwen-1.5B in Table 1 and 2. We use the baseline DeepSeek-R1-Distill-Qwen-1.5B since a 3B R1 reasoning model is still not available. Although M1-3B has more parameters than DeepSeek-R1-Distill-Qwen-1.5B, its speed is still comparable even with shorter contexts, so we believe this is a fair comparison. Our model's performance is competitive with state-of-the-art open reasoning models in the same model size range and outperforms larger nonreasoning math transformer models. Our model performs slightly worse on AIME24 compared to the DeepSeek-R1-Distill-Qwen-1.5B model. Notably, DeepSeek-R1-Distill-Qwen-1.5B is built on top of the Qwen2.5 MATH models, which were finetuned with over 1T MATH tokens on top of the Qwen2.5 models, significantly more training data than what M1-3B used in total." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.345, + 0.119 + ], + "angle": 0, + "content": "4.2 Speed Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.129, + 0.828, + 0.215 + ], + "angle": 0, + "content": "We benchmark inference time with our model against a transformer model (Llama-3.2.-3B (Grattafori et al., 2024)) of the same size. We use vLLM (version 0.6.3), which is the version used in VeRL for efficient rollouts. We also compare against DeepSeek-R1-Distill-Qwen-1.5B (DeepSeek-AI et al., 2025), a reasoning transformer model that is half the size of M1. This model has the same number of layers as the 3B parameter transformer, but the hidden dimension is half the size." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.221, + 0.825, + 0.279 + ], + "angle": 0, + "content": "According to Luo et al. (2025), the average generation length of reasoning models on MATH questions is \\(4\\mathrm{k}\\) to \\(5\\mathrm{k}\\). We therefore fix a decoding length of 4096 (and prompt length of 256) and benchmark our model across a range of batch sizes. We vary the batch size from 8 to 512, measuring the inference latency across different models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.283, + 0.827, + 0.385 + ], + "angle": 0, + "content": "We perform our benchmarking on a single NVIDIA H100 GPU with greedy decoding. To ensure that every model generates up to the set maximum number of tokens, we use ignoreEOS=True. Before recording results, we warm up the system with two runs. The final performance metrics are then averaged over three subsequent runs. The inference speeds of the models across batch sizes are shown in Figure 1. M1 achieves a \\(3 \\times\\) speedup over similarly-sized transformers when using a batch size of 512 and a decoding length of 4096, demonstrating its effectiveness in large-batch generation settings." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.388, + 0.825, + 0.503 + ], + "angle": 0, + "content": "The maximum length of generated sequences is also an important factor in RL training, as longer sequences allow the model to use more compute during learning by generating longer chains-of-thought, shown in Figure 5. To benchmark our model in this setting, we fix the batch size to 128, and vary the generation length. We compare against the same two models as in the batch size varying case, and the results are shown in Figure 2. As the generated sequence length increases, M1 achieves increasing speedups relative to the baseline models, and consistently generates at least \\(2x\\) faster than Llama-3.2-3B (2.64x faster for the longest sequence length)." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.515, + 0.488, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.184, + 0.658, + 0.476, + 0.699 + ], + "angle": 0, + "content": "Figure 1: Inference latency when using prompt length 256 and decoding length 4096." + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.515, + 0.822, + 0.656 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.524, + 0.658, + 0.808, + 0.685 + ], + "angle": 0, + "content": "Figure 2: Inference latency when using batch size 128." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.713, + 0.825, + 0.8 + ], + "angle": 0, + "content": "It is well-known that LLM inference comprises a prefilling (compute-bound) and a decoding (memory-bound) stage. For math reasoning models, it is common to assume that decoding takes much longer than prefilling, since prefilling only uses a short MATH question, while decoding generates long answers. Under these settings, the process is memory-bound. Given that Mamba is highly memory-efficient and we only use a SSM state size of 16, these memory advantages translate into improved speed." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.814, + 0.348, + 0.831 + ], + "angle": 0, + "content": "4.3 Test-Time Scaling" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.825, + 0.927 + ], + "angle": 0, + "content": "Given a fixed time budget, M1 can generate more sequences or longer sequences compared to a transformer model, which can hopefully boost its performance. We evaluate the effect of test-time compute scaling on model performance. We scale both the number of samples generated as well as the length of generated samples, to see if M1 benefits from additional compute along these axes. We aim to investigate whether the speed benefit from section 4.2 can translate into an accuracy gain." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.173, + 0.104, + 0.378, + 0.12 + ], + "angle": 0, + "content": "Scaling with majority vote." + }, + { + "type": "image", + "bbox": [ + 0.207, + 0.151, + 0.486, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.151, + 0.78, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.297, + 0.825, + 0.34 + ], + "angle": 0, + "content": "Figure 3: Number of samples vs. AIME25 accuracy (left) and generation time (seconds) vs. AIME25 accuracy (right). Both graphs include pass@1 and majority voting accuracies for M1 and DeepSeek-R1-Distill-Qwen-1.5B." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.357, + 0.825, + 0.44 + ], + "angle": 0, + "content": "The left side of Figure 3 shows the effect of scaling the number of generated samples (while fixing the maximum decoding length) on AIME25 accuracy. Both the baseline model and M1 see increasing accuracy as the number of samples increases, with M1 nearly matching the baseline performance for larger sample sizes. The efficient generation of M1 also means that generating large number of samples at test-time is faster than for the baseline transformer model." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.447, + 0.827, + 0.603 + ], + "angle": 0, + "content": "We quantify this efficiency in the right side of Figure 3, which compares the number of seconds spent generating samples against the resulting accuracy. To compute the time values on the x-axis, we find an optimal throughput value (in tokens per second) for each model by increasing batch sizes until throughput decreases. The optimal values were 7263 T/s for DeepSeek-R1-Distill-Qwen-1.5B, and 15169 T/s for M1. We then assume that each generated sample is maximum length (8K), and compute the seconds required for one sample from one model as 8K divided by the throughput. We then convert the left graph of Figure 3 into the right graph, by multiplying the number of samples for each datapoint by the seconds required per sample for each model. As an example, M1 requires roughly a half second (8K/15K) per sample, so the accuracy value for M1 at 32 samples on the left graph appears at approximately 16 seconds on the right graph." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.608, + 0.403, + 0.623 + ], + "angle": 0, + "content": "Scaling with longer sequences" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.629, + 0.827, + 0.756 + ], + "angle": 0, + "content": "Figure 4 shows the effect of scaling the maximum length of the generated answer, while fixing the number of generated samples to one. For both the baseline and M1, increasing the maximum sequence length leads to increased accuracy, as shown in the left graph in Figure 4. After converting from generation length to the seconds required to generate (done in the same way as Figure 3, but dividing the generation length by throughput), we can see the accuracy gain per time spent generating on the right side of Figure 4. In this case, M1 actually gets a higher accuracy for the same amount of time spent generating at 4 of the 5 evaluated sequence lengths, showing the benefits of efficient generation for test-time compute scaling." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.778, + 0.285, + 0.796 + ], + "angle": 0, + "content": "5 Analysis" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.812, + 0.625, + 0.828 + ], + "angle": 0, + "content": "Increasing Training Length in RL boosts model performance" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.904 + ], + "angle": 0, + "content": "With more efficient models, we can increase the length of sequences used in RL training, resulting in improved performance. Empirically, we see this in Figure 5, which shows an increase in accuracy on AIME25 as we scale up the length of sequences generated when training with GRPO. Training with sequences of maximum length 4096 results in accuracy below \\(10\\%\\), while allowing sequences up to length 24K boosts the accuracy up to \\(23\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.462, + 0.926 + ], + "angle": 0, + "content": "MATH Accuracy at each training stage" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.103, + 0.49, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.103, + 0.821, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.257, + 0.828, + 0.289 + ], + "angle": 0, + "content": "Figure 4: Generation length vs. AIME25 accuracy (left) and generation time (seconds) vs. AIME25 accuracy (right). Sampling for both models is done using a temperature of 0.8." + }, + { + "type": "image", + "bbox": [ + 0.361, + 0.33, + 0.637, + 0.486 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.258, + 0.504, + 0.739, + 0.521 + ], + "angle": 0, + "content": "Figure 5: Pass@1 vs. maximum sequence length in GRPO training" + }, + { + "type": "table", + "bbox": [ + 0.252, + 0.522, + 0.747, + 0.607 + ], + "angle": 0, + "content": "
MATH500AIME24
Distill380
Distill + SFT(MATH)450
Distill + SFT(MATH) + SFT(Reason)7422
Distill + SFT(MATH) + SFT(Reason) + RL8228
" + }, + { + "type": "table_caption", + "bbox": [ + 0.228, + 0.616, + 0.768, + 0.632 + ], + "angle": 0, + "content": "Table 3: M1 Accuracy after each training stage on MATH500 and AIME24." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.681, + 0.828, + 0.766 + ], + "angle": 0, + "content": "To identify which components of our training pipeline have the greatest impact on performance, we also evaluate intermediate versions of the model on MATH500 (Hendrycks et al., 2021) and AIME24 (MAA, 2024). The results of these evaluations are presented in Table 3. Each step of the training pipeline provides a boost to performance, with particularly large gains from fine-tuning on solutions from reasoning models (\\(+29\\%\\) on MATH500 and \\(+17\\%\\) on AIME24)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.771, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Direct Distillation from Reasoning Models We also attempted to distill from Deepseek-R1-Qwen-1.5B instead of Llama-3.2-3B. In this case, we did not SFT on OpenMathInstruct, and instead only SFT on the 10B reasoning data that we collected after distillation. We found that the distilled model's performance was poor (38% and 3.3% pass@1 accuracy on MATH500 and AIME24, respectively). Our hypothesis for why this occurs is that 10B tokens is insufficient to effectively transfer reasoning skills from the transformer to Mamba. Although curating a high-quality reasoning dataset demands significant time and effort, we begin by leveraging the standard MATH distillation dataset from OpenMathInstruct (Toshniwal et al., 2024) to first distill a strong MATH model. We then transform this MATH model into a reasoning model via SFT on the dedicated reasoning dataset. This approach achieves strong performance with a much smaller number of reasoning tokens." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.31, + 0.119 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.134, + 0.828, + 0.275 + ], + "angle": 0, + "content": "In this paper, we introduced M1, a hybrid reasoning model built on the Mamba architecture, designed to address the scalability challenges of the Transformer models. We demonstrated effective techniques for distillation and finetuning to develop M1, which achieves mathematical reasoning performance comparable to state-of-the-art reasoning models of similar size. Notably, M1 delivers over 3x faster inference than similar-sized Transformer models, even when using the heavily optimized vLLM inference engine, particularly at large batch sizes. This improved efficiency can make the resource-intensive inference-time strategies, such as self-consistency, more practical. Our findings establish M1 as a strong alternative to Transformer-based architectures, paving the way for more efficient and high-performing reasoning models." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.295, + 0.276, + 0.311 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.318, + 0.826, + 0.348 + ], + "angle": 0, + "content": "Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.357, + 0.827, + 0.415 + ], + "angle": 0, + "content": "Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebron, and Sumit Sanghai. Gqa: Training generalized multi-query transformer models from multi-head checkpoints. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 4895-4901, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.424, + 0.829, + 0.468 + ], + "angle": 0, + "content": "Maximilian Beck, Korbinian Poppel, Markus Spanring, Andreas Auer, Oleksandra Prudnikova, Michael Kopp, Günter Klambauer, Johannes Brandstetter, and Sepp Hochreiter. xlstm: Extended long short-term memory, 2024. URL https://arxiv.org/abs/2405.04517." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.475, + 0.826, + 0.518 + ], + "angle": 0, + "content": "Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models, 2024. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.527, + 0.826, + 0.57 + ], + "angle": 0, + "content": "Aviv Bick, Kevin Y. Li, Eric P. Xing, J. Zico Kolter, and Albert Gu. Transformers to ssms: Distilling quadratic knowledge to subquadratic models, 2024. URL https://arxiv.org/abs/2408.10189." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.58, + 0.826, + 0.624 + ], + "angle": 0, + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.631, + 0.827, + 0.703 + ], + "angle": 0, + "content": "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidy Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, and et. al. Evaluating large language models trained on code, 2021. URL https://arxiv.org/abs/2107.03374." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.712, + 0.827, + 0.768 + ], + "angle": 0, + "content": "Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.777, + 0.826, + 0.834 + ], + "angle": 0, + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems, 2021. URL https://arxiv.org/abs/2110.14168." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.844, + 0.827, + 0.887 + ], + "angle": 0, + "content": "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.895, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Tri Dao and Albert Gu. Transformers are ssms: Generalized models and efficient algorithms through structured state space duality, 2024. URL https://arxiv.org/abs/2405.21060." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.318, + 0.829, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.103, + 0.829, + 0.175 + ], + "angle": 0, + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, and et. al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.182, + 0.829, + 0.226 + ], + "angle": 0, + "content": "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars, 2025. URL https://arxiv.org/abs/2503.01307." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.233, + 0.826, + 0.276 + ], + "angle": 0, + "content": "Sachin Goyal, Ziwei Ji, Ankit Singh Rawat, Aditya Krishna Menon, Sanjiv Kumar, and Vaishnavh Nagarajan. Think before you speak: Training language models with pause tokens, 2024. URL https://arxiv.org/abs/2310.02226." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.284, + 0.829, + 0.356 + ], + "angle": 0, + "content": "Aaron Grattaftiori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, and et. al. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.363, + 0.829, + 0.393 + ], + "angle": 0, + "content": "Albert Gu and Tri Dao. Mamba: Linear-time sequence modeling with selective state spaces, 2024. URL https://arxiv.org/abs/2312.00752." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.4, + 0.826, + 0.43 + ], + "angle": 0, + "content": "Albert Gu, Karan Goel, and Christopher Ré. Efficiently modeling long sequences with structured state spaces, 2022. URL https://arxiv.org/abs/2111.00396." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.437, + 0.826, + 0.468 + ], + "angle": 0, + "content": "Yuxian Gu, Li Dong, Furu Wei, and Minlie Huang. Minillm: Knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2306.08543." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.474, + 0.829, + 0.517 + ], + "angle": 0, + "content": "Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.525, + 0.829, + 0.582 + ], + "angle": 0, + "content": "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.59, + 0.829, + 0.633 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.641, + 0.829, + 0.671 + ], + "angle": 0, + "content": "Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network, 2015. URL https://arxiv.org/abs/1503.02531." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.678, + 0.829, + 0.721 + ], + "angle": 0, + "content": "Jungo Kasai, Hao Peng, Yizhe Zhang, Dani Yogatama, Gabriel Ilharco, Nikolaos Pappas, Yi Mao, Weizhu Chen, and Noah A. Smith. Finetuning pretrained transformers into mns, 2021. URL https://arxiv.org/abs/2103.13076." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.728, + 0.829, + 0.772 + ], + "angle": 0, + "content": "Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast autoregressive transformers with linear attention, 2020. URL https:// arxiv.org/abs/2006.16236." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.779, + 0.829, + 0.838 + ], + "angle": 0, + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.845, + 0.829, + 0.888 + ], + "angle": 0, + "content": "Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, 2025. Accessed: 2025-01-22." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.829, + 0.926 + ], + "angle": 0, + "content": "Xinzhe Li. A survey on llm test-time compute via search: Tasks, llm profiling, search algorithms, and relevant frameworks, 2025. URL https://arxiv.org/abs/2501.10069." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.103, + 0.829, + 0.176 + ], + "angle": 0, + "content": "Opher Lieber, Barak Lenz, Hofit Bata, Gal Cohen, Jhonathan Osin, Itay Dalmedigos, Erez Safahi, Shaked Meirom, Yonatan Belinkov, Shai Shalev-Shwartz, Omri Abend, Raz Alon, Tomer Asida, Amir Bergman, Roman Glozman, Michael Gokhman, Avashalom Manevich, Nir Ratner, Noam Rozen, Erez Shwartz, Mor Zusman, and Yoav Shoham. Jamba: A hybrid transformer-mamba language model, 2024. URL https://arxiv.org/abs/2403.19887." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.182, + 0.829, + 0.224 + ], + "angle": 0, + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023. URL https://arxiv.org/abs/2305.20050." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.232, + 0.829, + 0.288 + ], + "angle": 0, + "content": "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Meiqi Guo, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision, 2024. URL https://arxiv.org/abs/2406.06592." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.296, + 0.999, + 0.382 + ], + "angle": 0, + "content": "Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. DeepScaler: Surpassing o1-preview with a 1.5b model by scaling rl. https://pretty-radio-b75.notion.site/ DeepScaleR-Surpassing-01-Preview-with-a-1-5B-Model-by-Scaling-RL-19681902c1468005bed8ca303013a4e2, 2025. Notion Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.388, + 0.829, + 0.431 + ], + "angle": 0, + "content": "MAA. American invitational mathematics examination 2023, 2023. URL https://artofproblemsolving.com/wiki/index.php/American_Invitational_Mathematics_Examination?srltid=AfmBOoqiDCiaGTLQrsRTKsZui8RFnjOZqM4qIqY3yGB3sBaqOaxwf_Xt." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.438, + 0.829, + 0.481 + ], + "angle": 0, + "content": "MAA. American invitationalal mathematics examination 2024, 2024. URL https://artofproblemsolving.com/wiki/index.php/American_Invitationalal_Mathematics_Examination?srltid=AfmBOoqiDCiaGTLQrsRTKsZui8RFnj0ZqM4qIqY3yGB3sBaqOaxwf_Xt." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.488, + 0.829, + 0.531 + ], + "angle": 0, + "content": "MAA. American invitational mathematics examination 2025, 2025. URL https://artofproblemsolving.com/wiki/index.php/American_Invitational_Mathematics_Examination?srltid=AfmB0oqiDCiaGTLQrsRTKsZui8RFnjOZqM4qIqY3yGB3sBaqOaxwf_Xt." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.538, + 0.829, + 0.581 + ], + "angle": 0, + "content": "Jean Mercat, Igor Vasiljevic, Sedrick Keh, Kushal Arora, Achal Dave, Adrien Gaidon, and Thomas Kollar. Linearizing large language models, 2024. URL https://arxiv.org/abs/2405.06640." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.588, + 0.829, + 0.632 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candes, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.639, + 0.829, + 0.682 + ], + "angle": 0, + "content": "Daniele Paliotta, Junxiong Wang, Matteo Pagliardini, Kevin Y Li, Aviv Bick, J Zico Kolter, Albert Gu, François Fleuret, and Tri Dao. Thinking slow, fast: Scaling inference compute with distilled reasoners. arXiv preprint arXiv:2502.20339, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.688, + 0.829, + 0.759 + ], + "angle": 0, + "content": "Bo Peng, Eric Alcaide, Quentin Anthony, Alon Albalak, Samuel Arcadinho, Stella Biderman, Huanqi Cao, Xin Cheng, Michael Chung, Matteo Grella, Kranthi Kiran GV, Xuzheng He, Haowen Hou, Jiaju Lin, Przemyslaw Kazienko, Jan Kocon, Jiaming Kong, Bartlomiej Koptyra, Hayden Lau, and et. al. Rwkv: Reinventing rnns for the transformer era, 2023. URL https://arxiv.org/abs/2305.13048." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.767, + 0.829, + 0.809 + ], + "angle": 0, + "content": "Jacob Pfau, William Merrill, and Samuel R. Bowman. Let's think dot by dot: Hidden computation in transformer language models, 2024. URL https://arxiv.org/abs/2404.15758." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.817, + 0.829, + 0.859 + ], + "angle": 0, + "content": "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers, 2024. URL https://arxiv.org/abs/2408.06195." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.867, + 0.829, + 0.925 + ], + "angle": 0, + "content": "Qwen,:, An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, and et. al. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.103, + 0.999, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Tokiniaina Raharison Ralambomihanta, Shahrad Mohammadzadeh, Mohammad Sami Nur Islam, Wassim Jabbour, and Laurence Liang. Scavenging hyena: Distilling transformers into long convolution models, 2024. URL https://arxiv.org/abs/2401.17574." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.157, + 0.827, + 0.201 + ], + "angle": 0, + "content": "Liliang Ren, Yang Liu, Yadong Lu, Yelong Shen, Chen Liang, and Weizhu Chen. Samba: Simple hybrid state space models for efficient unlimited context language modeling, 2024. URL https://arxiv.org/abs/2406.07522." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.21, + 0.825, + 0.242 + ], + "angle": 0, + "content": "Matthew Renze and Erhan Guven. The effect of sampling temperature on problem solving in large language models, 2024. URL https://arxiv.org/abs/2402.05201." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.25, + 0.826, + 0.295 + ], + "angle": 0, + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.304, + 0.826, + 0.348 + ], + "angle": 0, + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.357, + 0.826, + 0.401 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https:// arxiv.org/abs/2408.03314." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.411, + 0.826, + 0.455 + ], + "angle": 0, + "content": "Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data, 2024. URL https://arxiv.org/abs/2410.01560." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.465, + 0.826, + 0.51 + ], + "angle": 0, + "content": "Jonathan Uesato, Nate Kushman, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process- and outcome-based feedback, 2022. URL https://arxiv.org/abs/2211.14275." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.518, + 0.826, + 0.562 + ], + "angle": 0, + "content": "Junxiong Wang, Daniele Paliotta, Avner May, Alexander Rush, and Tri Dao. The mamba in the llama: Distilling and accelerating hybrid models. Advances in Neural Information Processing Systems, 37:62432-62457, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.572, + 0.826, + 0.615 + ], + "angle": 0, + "content": "Junxiong Wang, Daniele Paliotta, Avner May, Alexander M. Rush, and Tri Dao. The mamba in the llama: Distilling and accelerating hybrid models. arXiv preprint arXiv:2408.15237, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.625, + 0.826, + 0.67 + ], + "angle": 0, + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models, 2023. URL https://arxiv.org/abs/2203.11171." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.678, + 0.826, + 0.737 + ], + "angle": 0, + "content": "Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. Emergent abilities of large language models, 2022. URL https://arxiv.org/abs/2206.07682." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.746, + 0.826, + 0.791 + ], + "angle": 0, + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed Chi, Quoc Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models, 2023. URL https://arxiv.org/abs/2201.11903." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.8, + 0.826, + 0.844 + ], + "angle": 0, + "content": "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models, 2024. URL https://arxiv.org/abs/2408.00724." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.853, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, Chenyang Shao, Yuwei Yan, Qinglong Yang, Yiwen Song, Sijian Ren, Xinyuan Hu, Yu Li, Jie Feng, Chen Gao, and Yong Li. Towards large reasoning models: A survey of reinforced reasoning with large language models, 2025. URL https://arxiv.org/abs/2501.09686." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2402.13116." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.153, + 0.826, + 0.196 + ], + "angle": 0, + "content": "Songlin Yang, Bailin Wang, Yikang Shen, Rameswar Panda, and Yoon Kim. Gated linear attention transformers with hardware-efficient training, 2024. URL https://arxiv.org/abs/2312.06635." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.203, + 0.826, + 0.234 + ], + "angle": 0, + "content": "Wang Yang, Hongye Jin, Jingfeng Yang, Vipin Chaudhary, and Xiaotian Han. Thinking preference optimization, 2025. URL https://arxiv.org/abs/2502.13173." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.24, + 0.826, + 0.283 + ], + "angle": 0, + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models, 2023. URL https://arxiv.org/abs/2305.10601." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.29, + 0.826, + 0.334 + ], + "angle": 0, + "content": "Jingyang Yuan, Huazuo Gao, Damai Dai, Junyu Luo, Liang Zhao, Zhengyan Zhang, Zhenda Xie, YX Wei, Lean Wang, Zhiping Xiao, et al. Native sparse attention: Hardware-aligned and natively trainable sparse attention. arXiv preprint arXiv:2502.11089, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.34, + 0.826, + 0.384 + ], + "angle": 0, + "content": "Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025. Notion Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.39, + 0.826, + 0.433 + ], + "angle": 0, + "content": "Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Restmcts*: Llm self-training via process reward guided tree search, 2024a. URL https://arxiv.org/abs/2406.03816." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.44, + 0.826, + 0.496 + ], + "angle": 0, + "content": "Michael Zhang, Simran Arora, Rahul Chalamala, Benjamin Frederick Spector, Alan Wu, Krithik Ramesh, Aaryan Singhal, and Christopher Re. Lolcats: On low-rank linearizing of large language models. In The Thirteenth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.504, + 0.826, + 0.547 + ], + "angle": 0, + "content": "Michael Zhang, Kush Bhatia, Hermann Kumbong, and Christopher Ré. The hedgehog & the porcupine: Expressive linear attentions with softmax mimicry, 2024b. URL https://arxiv.org/abs/2402.04347." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.554, + 0.826, + 0.597 + ], + "angle": 0, + "content": "Shun Zhang, Zhenfang Chen, Yikang Shen, Mingyu Ding, Joshua B. Tenenbaum, and Chuang Gan. Planning with large language models for code generation, 2023. URL https://arxiv.org/abs/2303.05510." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.604, + 0.826, + 0.647 + ], + "angle": 0, + "content": "Xuan Zhang, Fengzhuo Zhang, Cunxiao Du, Chao Du, Tianyu Pang, Wei Gao, and Min Lin. Lighttransfer: Your long-context llm is secretly a hybrid model with effortless adaptation. arXiv preprint arXiv:2410.13846, 2024c." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.103, + 0.826, + 0.647 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.671, + 0.47, + 0.687 + ], + "angle": 0, + "content": "A Limitations and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.702, + 0.825, + 0.79 + ], + "angle": 0, + "content": "Speedup. Our current hybrid model is only \\(3 \\times\\) faster than a Transformer of the same size when serving inference with vLLM. Recently, NVIDIA introduced a new hybrid Mamba kernel7, which could further boost the speed of hybrid models. Additionally, our attention implementation in hybrid models does not yet leverage the optimizations available in vLLM. Integrating M1 into vLLM could further boost performance by taking advantage of these attention speedups." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.803, + 0.825, + 0.902 + ], + "angle": 0, + "content": "Why do we not distill Qwen2.5 1.5B MATH model. We considered using the Qwen2.5 1.5B MATH Instruct model as the distillation target in the first stage. However, we found that the cross entropy loss of the Qwen 1.5B MATH model on the OpenMATH Instruct dataset (Toshniwal et al., 2024) exceeded 1.8, which is much higher than that of the Llama models (0.5). This suggests that, to mimic the Qwen2.5 model, we need a dataset generated from a large Qwen2.5 series model rather than this one generated from the Llama models. Dataset curation from Qwen Math models goes beyond the scope of this work." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.826, + 0.924 + ], + "angle": 0, + "content": "7https://github.com/NVIDIA/Megatron-LM/commit/b957578e76a921209ef873cbbd389114a4042542" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.327 + ], + "angle": 0, + "content": "Improvement on RL training speed. Recently, DeepSeek R1 (DeepSeek-AI et al., 2025) showed that reinforcement learning (RL) is a key component in improving model reasoning performance during post-training. Since then, recent research has predominantly relied on reinforcement learning (RL) as a training paradigm for reasoning models. However, training with RL requires the efficient generation of long sequences. For example, in VeRL (Sheng et al., 2024), the typical training batch size ranges from a few thousand to several thousand. DeepscaleR (Luo et al., 2025) also shows a significant accuracy boost when training RL with longer sequences, as it tends to enhance model performance by providing more steps for thorough reasoning. However, this shift towards reinforcement learning has resulted in the generation process becoming a significant bottleneck in reasoning model training, taking more than three times as long as the actor's weight update (forward + backward) according to the time profiling done for DeepscaleR (Luo et al., 2025). This need for efficient generation in RL presents a significant challenge for transformer models, namely due to the heavy computational burden imposed by large key-value caches during generation, especially for large batch sizes. Given their generation speed advantages, linear RNN models may be better suited for scaling RL training." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10449/7f439293-0959-4bd1-95b9-6ff52e6c616f_origin.pdf b/data/2025/2504_10xxx/2504.10449/7f439293-0959-4bd1-95b9-6ff52e6c616f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2b92f1c26785a53947169e890d34cbd597d5280c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/7f439293-0959-4bd1-95b9-6ff52e6c616f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d423929d7fe342e106efdf8c3e518f29efddb4a193edd46696189eac9a3056cf +size 387898 diff --git a/data/2025/2504_10xxx/2504.10449/full.md b/data/2025/2504_10xxx/2504.10449/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1a2a862b455f356781519a9f870ffe99fd94357f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/full.md @@ -0,0 +1,281 @@ +# M1: Towards Scalable Test-Time Compute with Mamba Reasoning Models + +Junxiong Wang $^{1}$ , Wen-Ding Li $^{2}$ , Daniele Paliotta $^{3*}$ + +Daniel Ritter2, Alexander M. Rush2, Tri Dao1,4 + +$^{1}$ TogetherAI, $^{2}$ Cornell University, $^{3}$ University of Geneva, $^{4}$ Princeton University + +# Abstract + +Effective reasoning is crucial to solving complex mathematical problems. Recent large language models (LLMs) have boosted performance by scaling test-time computation through long chain-of-thought reasoning. However, transformer-based models are inherently limited in extending context length due to their quadratic computational complexity and linear memory requirements. In this paper, we introduce a novel hybrid linear RNN reasoning model, M1, built on the Mamba architecture, which allows memory-efficient inference. Our approach leverages a distillation process from existing reasoning models and is further enhanced through RL training. Experimental results on the AIME and MATH benchmarks show that M1 not only outperforms previous linear RNN models but also matches the performance of state-of-the-art Deepseek R1 distilled reasoning models at a similar scale. We also compare our generation speed with a highly performant general purpose inference engine, vLLM, and observe more than a 3x speedup compared to a same size transformer. With throughput speedup, we are able to achieve higher accuracy compared to DeepSeek R1 distilled transformer reasoning models under a fixed generation time budget using self-consistency voting. Overall, we introduce a hybrid Mamba reasoning model and provide a more effective approach to scaling test-time generation using self-consistency or long chain of thought reasoning. Code and pre-trained checkpoints are open-sourced at github.com/jxiw/M1. + +# 1 Introduction + +Robust and effective reasoning is the cornerstone for successfully performing tasks in domains such as mathematics and programming. Additionally, performance on reasoning tasks can often be boosted by generating longer sequences and/or generating many sequences in parallel (Snell et al., 2024). However, current transformer-based large language models (LLMs) face significant challenges when tasked with processing long sequences with large batch sizes. These models are constrained by a quadratic increase in computational complexity as the sequence length grows, coupled with a linear escalation in memory requirements. This combination makes it increasingly difficult for models to scale efficiently when handling large inputs. + +Although linear hybrid RNN models (Gu & Dao, 2024; Dao & Gu, 2024; Beck et al., 2024; Yang et al., 2024; Peng et al., 2023) have shown great potential as an alternative to transformer-based on general language models, their effectiveness on reasoning tasks remains unclear. Since modern reasoning models typically generate long chains of thought for challenging math questions, it is uncertain whether the performance of hybrid linear RNNs diminishes in such scenarios. + +In this paper, we propose M1 and show that it is possible to derive strong hybrid reasoning models by efficiently transferring reasoning capabilities from a large transformer model. Our training process involves distilling knowledge, incorporating math and reasoning abilities through supervised fine-tuning (SFT), and finally, boosting performance using reinforcement learning (RL) training. In total, the training process requires fewer than 50 billion tokens. In contrast, DeepSeek-R1-Distill-Qwen-1.5B is finetuned from Qwen2.5 MATH 1.5B which is trained using over 1 trillion MATH tokens on top of Qwen2.5. + +We demonstrate that our hybrid models achieve a 3x speedup compared to transformers of the same size when served using a highly performant general purpose inference engine, vLLM, at large batch sizes. This gain is mainly due to large batches and long sequences, decoding being generally memory-bound. Lower memory usage of hybrid models can transform this advantage into a speed gain. The decoding speedup is approximately linear with the volume of model's memory access (Yuan et al., 2025). + +Notably, this speedup can be converted to a gain in reasoning accuracy. Studies (Snell et al., 2024; Li, 2025; Chen et al., 2025) show that techniques such as self-consistency (Wang et al., 2023) and verification (Cobbe et al., 2021) at test time can significantly boost model reasoning performance. Under these conditions, a high-throughput model can further enhance its performance by generating more samples. + +The paper is organized as follows. Section 2 covers related work, Section 3 introduces our pipeline for distilling a hybrid reasoning model, and Section 4.1 presents our results evaluating M1 on math benchmarks. Sections 4.2 and 4.3 evaluate the performance gains of M1 in terms of both inference speed and scaling test-time compute. Section 5 provides some additional analysis of the impact of different generation lengths when training on RL, and of the impact of the different steps of the distillation pipeline we propose on performance. + +Overall, we show that M1 performs on par with DeepSeek-R1-Distill-Qwen-1.5B, achieving scores of 82 on MATH500 (Hendrycks et al., 2021), 23 on AIME25 (MAA, 2025), 28 on AIME24 (MAA, 2024), and 47 on OlympiadBench (He et al., 2024), while offering $3 \times$ faster inference throughput, even compared to the highly optimized vLLM (Kwon et al., 2023) implementation for Transformer models. + +# 2 Related Work + +# 2.1 Reasoning models + +Recent models like Deepseek-R1 (DeepSeek-AI et al., 2025) have shown the potential of RL training to improve performance on verifiable reasoning tasks, such as math problem solving and programming. Additional work has proposed methods for inducing this reasoning behavior via supervised fine-tuning, either on curated data (Muennighoff et al., 2025) or on generated pairs of traces (Yang et al., 2025). Other approaches also combine search procedures such as MCTS with language models (Qi et al., 2024) or alter standard RL training schemes to control the length of generated outputs (Aggarwal & Welleck, 2025). After training, these models solve complex tasks by generating long chains of thought, which often include subtasks of the overall problem, multiple attempted solutions, and backtracking over prior attempts (Gandhi et al., 2025). Since the performance of these models, both during training and inference, relies on generating lengthy chains of thought, more efficient architectures can enable larger scale training and less costly generation. + +# 2.2 Enhancing Reasoning via Scaled Inference Compute + +Increasing the computational budget during inference has become a promising approach to boost LLM performance. Methods like Chain of Thought (CoT) and its derivatives have achieved notable gains on reasoning benchmarks by breaking down complex tasks into intermediate steps (Wei et al., 2023; Yao et al., 2023). Although decomposing tasks improves reasoning, it also lengthens generation sequences and raises computational costs. Some recent studies even indicate that this extra computation might itself enhance model capabilities (Pfau et al., 2024). In addition, adaptive compute allocation during inference + +has been explored. For example, Goyal et al. (2024) incorporated pause tokens into the vocabulary, allowing models to distribute compute more efficiently and improve both reasoning and overall task performance. LightTransfer (Zhang et al., 2024c) introduces a lightweight method that detects lazy layers and replaces their full attention with streaming attention—slashing KV-cache overhead and boosting throughput. + +Another strategy involves generating several outputs and selecting the best one. Researchers have developed various sampling algorithms to diversify and enhance the quality of generated responses, thereby increasing the chances of retrieving the most accurate answer (Wang et al., 2023; Renze & Guven, 2024; Zhang et al., 2023). Moreover, outcome and process reward models (ORMs and PRMs) have been introduced to evaluate responses and steer intermediate generation steps (Lightman et al., 2023; Zhang et al., 2024a; Luo et al., 2024; Uesato et al., 2022). + +Recent investigations reveal that, under fixed compute budgets, smaller LLMs augmented with inference-time compute techniques (such as majority voting or PRM-guided search) can outperform larger models (Snell et al., 2024; Wu et al., 2024; Beeching et al., 2024). However, these results are mainly confined to Transformer-based architectures, leaving open questions about whether similar scaling laws hold for subquadratic architectures, which offer faster inference but might compromise on expressiveness. + +# 2.3 Alternatives to Transformer Architectures + +Even though most reasoning models are based on the Transformer architecture (Grattaftori et al., 2024; Qwen et al., 2025), alternatives have been proposed to alleviate their high computational cost. Models built on top of RNNs (Beck et al., 2024; Peng et al., 2023), state space models (SSMs) (Gu et al., 2022; Gu & Dao, 2024), and linear attention mechanisms (Katharopoulos et al., 2020; Yang et al., 2024) demonstrate superior inference and memory efficiency, particularly for long-context tasks and large-batch generation. The Mamba series (Mamba-1 and Mamba-2) notably introduced selective state spaces to enable linear-time sequence modeling with strong performance (Gu & Dao, 2024; Dao & Gu, 2024). In addition, hybrid architectures that combine a few self-attention layers with subquadratic layers (e.g., Mamba) have emerged, showing advantages over both pure Transformer and pure subquadratic designs (Lieber et al., 2024; Ren et al., 2024). Such architectures are particularly suited to meet the high compute demands of inference-time scaling, and our work investigates their scaling properties. + +# 2.4 Knowledge Distillation Strategies + +Knowledge distillation has proven to be an effective means of transferring capabilities from large teacher models to smaller, more efficient student models (Hinton et al., 2015). In LLMs, this process compresses a larger pre-trained model into a more compact version while preserving core knowledge and functionality (Gu et al., 2024; Xu et al., 2024). Although larger models tend to exhibit superior reasoning abilities due to scaling properties (Xu et al., 2025; Wei et al., 2022), distillation techniques have enabled smaller models to achieve competitive reasoning performance (DeepSeek-AI et al., 2025; Labs, 2025). While most efforts have focused on intra-architecture distillation (e.g., Transformer-to-Transformer), recent studies have ventured into cross-architecture distillation. For instance, pretrained Transformers have been distilled into architectures such as RNNs (Kasai et al., 2021; Mercat et al., 2024), linear attention models (Zhang et al., 2024b; Zhang et al.), convolutional networks (Ralambomihanta et al., 2024), and SSMs (Bick et al., 2024; Wang et al., 2024b; Paliotta et al., 2025). Whether the robust reasoning abilities of Deepseek R1 (DeepSeek-AI et al., 2025) distilled models can be effectively transferred across different architectures remains an open question. + +# 3 The M1 Reasoning Model + +In this section, we present a multi-stage process for building our hybrid linear RNN reasoning model, M1. The approach has three stages: distillation, SFT, and RL. We begin by + +Algorithm 1 Initializing MAMBAINLLAMA +1: Shapes: B - Batch, L - Length, D - embed size, $N = D$ /Attention_heads, $N'$ - expand +2: Input: $o_t$ : (B, D) +3: Output: output: (B, D) +4: New Params: MLP, A +5: for each head $\mathbf{W}^K, \mathbf{W}^Q, \mathbf{W}^V, \mathbf{W}^o : (N, D)$ after expanding to same dimension do +6: Head Parameter: A : (N, $N'$ ) +7: for all positions t: +8: $x_t : (B, N) \leftarrow \mathbf{W}^V o_t$ +9: $\mathbf{B}_t : (B, N) \leftarrow \mathbf{W}^K o_t$ +10: $\mathbf{C}_t : (B, N) \leftarrow \mathbf{W}^Q o_t$ +11: $\Delta_t : (B, N') \leftarrow \mathrm{MLP}(x_t)$ +12: $\overline{\mathbf{A}}_{1:T}, \overline{\mathbf{B}}_{1:T}, \overline{\mathbf{C}}_{1:T} : (B, N, N') \leftarrow \mathrm{DISC}(\mathbf{A}, \mathbf{B}, \mathbf{C}, \Delta)$ +13: $y \gets \mathrm{LINEARRNN}(\overline{\mathbf{A}}, \overline{\mathbf{B}}, \overline{\mathbf{C}}, x)$ +14: output $\leftarrow$ output + $\mathbf{W}^{OT} y$ +15: end for +16: return output + +distilling a Transformer model into a Mamba architecture, adapting the method of Wang et al. (2024a), which initializes the hybrid model's weights from a transformer model. We then perform math-specific supervised fine-tuning (SFT) on general mathematical datasets to enhance the model's mathematical performance, first without yet incorporating datasets generated by reasoning-focused models, and then with reasoning data leveraging multiple large-scale datasets generated by the R1 model series. Finally, we apply R1's GRPO method to further enhance the model's math reasoning capability. + +Stage 1: Distillation. The first step in building our M1 model is distilling a pretrained transformer model into a Mamba model. We adapt the distillation approach introduced by Wang et al. (2024a). + +The MAMBAINLLAMA framework (Wang et al., 2024a) proposes distilling hybrid Transformer-Mamba models by reusing weights from attention layers. In this distillation procedure, outlined in Algorithm 1, linear projections for $\mathbf{Q},\mathbf{K},\mathbf{V}$ , and $\mathbf{O}$ are initialized from the corresponding projections for $\mathbf{C},\mathbf{B},\mathbf{X}$ , and $\mathbf{O}$ , respectively. The newly introduced parameters in the Mamba layers are the sampling rate $\Delta$ and the dynamic parameter $\mathbf{A}$ , which control the resulting Mamba module via a discretization function. Specifically, the sampling rate $\Delta \in \mathbb{R}^{N'}$ discretizes $\mathbf{B}_t,\mathbf{C}_t \in \mathbb{R}^{N\times 1}$ , yielding $\overline{\mathbf{B}}_t,\overline{\mathbf{C}}_t \in \mathbb{R}^{N'\times N\times 1}$ , as detailed in Algorithm 1. Different from Wang et al. (2024a), we introduce two additional linear layers to project from head.dim $*$ kv_head to head.dim $*$ n_head. This is because GQA (Ainslie et al., 2023) is used in the transformer model to reduce the KV cache. As Mamba does not utilize a KV cache, this expansion can increase the expressiveness of $\mathbf{B}$ and $\mathbf{X}$ . + +We directly reuse the MLP layers; however, unlike the original approach, we replace the attention layers with Mamba layers in a single step. Subsequently, we fine-tune the entire model to expedite the training process. The distillation step involves minimizing the token-level KL divergence, aligning the entire probability distribution of the student model, $p(\cdot ;\theta)$ , with the teacher model, $p(\cdot ;\theta_T)$ , for every candidate token at position $t$ . We use the reverse KL divergence, $D_{\mathrm{KL}}(p(\cdot ;\theta)\parallel p(\cdot ;\theta_T))$ , as our loss function rather than the forward KL divergence. We choose the reverse KL divergence due to its mode-seeking properties, which results in improved empirical performance. + +We reimplement the distillation and SFT framework using the Axolotl ${}^{1}$ training framework. We apply the model chat template,mask the user prompt,and compute the loss only over the tokens generated in the assistant's output. To speed up training,we use data packing to merge different sequences into a single one until we reach the maximum sequence length which is set to 8192 . We find that data packing achieves significantly better results compared + +to the non-packing version in distillation for the same training steps. We use the AdamW optimizer with learning rate $1 \times 10^{-5}$ with cosine decay, $\beta = (0.9, 0.95)$ and a weight decay of 0.1. + +Stage 2: SFT Following the distillation procedure, we finetune the model on a large set of math problems, OpenMathInstruct-2 (Toshniwal et al., 2024). As in the distillation stage, we apply the chat template to the prompts, mask the user prompt, and compute the loss only over the tokens generated in the assistant's output. We train for two epochs using the same optimizer as distillation. + +After the initial fine-tuning stage, we finetune on an additional set of math problems and solutions generated by reasoning models. We collect a mixed reasoning dataset, including OpenR1-Math-220k $^{2}$ , OpenThoughts-114k-math $^{3}$ , and ServiceNow-AI-R1-Distill $^{4}$ , Magpie-Reasoning-250K $^{5}$ for a total of 10B reasoning tokens. The first two datasets were generated from R1, while the last two was generated from the R1 distilled Qwen 32B model and R1 distilled Llama 70B model. We extended the training length to 24,576 because we found that it covers $99\%$ of the data items. We train the model for five epochs using the same optimizer as before but changing the peak learning rate to $6 \times 10^{-6}$ . + +Stage 3: Reasoning RL. To further enhance performance, we integrate Mamba with a RL pipeline for further training.6 We use GRPO as the loss function. Differing from (Shao et al., 2024), we remove the KL penalty term as empirically we find it destabilizes training. Additionally, we include an entropy bonus to encourage a more diverse policy. The resulting formula is, + +$$ +L _ {\mathrm {G R P O}} (\theta) = \mathbb {E} _ {\tau \sim \pi_ {\theta_ {\mathrm {o l d}}}} \left[ \frac {\pi_ {\theta} (a | s)}{\pi_ {\theta_ {\mathrm {o l d}}} (a | s)} \hat {A} (s, a) \right] + \eta H (\pi_ {\theta}) \tag {1} +$$ + +where $\hat{A}(s, a)$ is the estimate of the advantage from multiple rollouts. We use a batch size of 128 and a PPO batch size of 64, which also determines the number of PPO iterations, $\mu = 2$ . We set the number of generations for each sequence to 8 and the maximum generation length to 32k. For optimization, we use the Adam optimizer with a learning rate of $1 \times 10^{-6}$ . We train for 50 steps, and pick the best checkpoint with the highest critic reward. We append the simple prompt "Let's think step by step and output the final answer within $\backslash$ boxed{}") to the end of each question in both training and evaluation. + +# 4 Experiments + +Model. We adopt the Llama3.2-3B-Instruct models as distillation target models. For Mamba layers, we set the SSM state size to 16. Consequently, the number of SSM groups after expansion is $3072 / 16 = 192$ for the 3B model. We use 6 interleaved attention layers among 28 total layers. + +Evaluation Dataset. Following common practice in evaluating reasoning models, we use a similar set of math benchmarks, including competition-level problems: MATH500 (Hendrycks et al., 2021), AIME25 (MAA, 2025), AIME24 (MAA, 2024), AMC23 (MAA, 2023), and OlympiadBench (He et al., 2024). + +$^{2}$ https://huggingface.co/datasets/open-r1/OpenR1-Math-220k +3https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k +4https://huggingface.co/datasets/ServiceNow-AI/R1-Distill-SFT +5https://huggingface.co/datasets/Magpie-Align/Magpie-Reasoning-V2-250K-CoT-Deepseek-R1-Llama-70B +6We add it into the popular VeRL (Sheng et al., 2024) framework. In doing so, we addressed and resolved the CUDA graph incompatibility issues that previously arose during training with PyTorch's FSDP module. As a result, the updated framework now efficiently supports Mamba generation with CUDA graph enabled, making it $5 \times$ faster than with CUDA Graph disabled + +Evaluation Metrics. Our model's performance is assessed using two key metrics: coverage and accuracy. In fields such as coding and formal proofs, where answers can be automatically verified, coverage translates directly to enhanced performance and is widely utilized (Chen et al., 2021; Brown et al., 2024). Coverage is often measured using the pass@k metric, with $k$ indicating the number of samples per problem (Chen et al., 2021; Brown et al., 2024). This metric estimates the likelihood that at least one correct solution exists among the $k$ samples. To minimize variance when calculating coverage, we employ the unbiased estimation formula from Chen et al. (2021). Specifically, we generate $N \geq k$ total samples per task. The probability that a correct solution exists among a pool of $k$ generated samples can then be determined given the total number of correct solutions $C_i$ for each task. + +$$ +\text {p a s s} @ \mathrm {k} = \frac {1}{\# \text {o f p r o b l e m s}} \sum_ {i = 1} ^ {\# \text {o f p r o b l e m s}} \left(1 - \frac {\binom {N - C _ {i}} {k}}{\binom {N} {k}}\right) +$$ + +We implement this formula using a numerically stable approach as recommended by Chen et al. (2021). + +When using additional compute, we employ multiple aggregation strategies. The most straightforward method is majority voting, also known as self-consistency decoding (Wang et al., 2023), which takes the majority response among $k$ samples as the predicted answer, and uses that to compute the accuracy. + +# 4.1 Reasoning Evaluation + +
ModelAIME25AIME24MATH500AMC23OlympiadBench
Qwen2.5-Math-7B-Instruct-13.379.850.640.7
rStar-Math-7B (Guan et al., 2025)-26.778.447.547.1
Eurus-2-7B-PRIME (Cui et al., 2025)-26.779.257.842.1
Qwen2.5-7B-SimpleRL (Zeng et al., 2025)-26.782.462.543.3
DeepSeek-R1-Qwen-1.5B23.028.882.862.943.3
M1-3B23.528.982.162.847.3
+ +Table 1: Evaluation results for M1-3B, DeepSeek-R1-Distill-Qwen-1.5B and other MATH models on MATH benchmarks + +
ModelAIME25AIME24MATH500AMC23OlympiadBench
Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32
DeepSeek-R1-Qwen-1.5B23.035.028.849.282.891.062.954.243.380.3
M1-3B23.534.629.050.582.191.862.855.047.380.1
+ +Table 2: Maj@32 results comparing M1-3B with DeepSeek-R1-Distill-Qwen-1.5B. + +We evaluate our models using a temperature setting of 0.7 and a sequence length of $32\mathrm{k}$ with evaluation tools in VeRL. We use $32\mathrm{k}$ because it has become the standard for evaluating performance on reasoning models (DeepSeek-AI et al., 2025; Luo et al., 2025). We report the pass@1 metric averaged over 64 runs; for majority voting, we repeat the metric calculation 100 times. + +We report the accuracy of M1-3B and DeepSeek-R1-Distill-Qwen-1.5B in Table 1 and 2. We use the baseline DeepSeek-R1-Distill-Qwen-1.5B since a 3B R1 reasoning model is still not available. Although M1-3B has more parameters than DeepSeek-R1-Distill-Qwen-1.5B, its speed is still comparable even with shorter contexts, so we believe this is a fair comparison. Our model's performance is competitive with state-of-the-art open reasoning models in the same model size range and outperforms larger nonreasoning math transformer models. Our model performs slightly worse on AIME24 compared to the DeepSeek-R1-Distill-Qwen-1.5B model. Notably, DeepSeek-R1-Distill-Qwen-1.5B is built on top of the Qwen2.5 MATH models, which were finetuned with over 1T MATH tokens on top of the Qwen2.5 models, significantly more training data than what M1-3B used in total. + +# 4.2 Speed Evaluation + +We benchmark inference time with our model against a transformer model (Llama-3.2.-3B (Grattafori et al., 2024)) of the same size. We use vLLM (version 0.6.3), which is the version used in VeRL for efficient rollouts. We also compare against DeepSeek-R1-Distill-Qwen-1.5B (DeepSeek-AI et al., 2025), a reasoning transformer model that is half the size of M1. This model has the same number of layers as the 3B parameter transformer, but the hidden dimension is half the size. + +According to Luo et al. (2025), the average generation length of reasoning models on MATH questions is $4\mathrm{k}$ to $5\mathrm{k}$ . We therefore fix a decoding length of 4096 (and prompt length of 256) and benchmark our model across a range of batch sizes. We vary the batch size from 8 to 512, measuring the inference latency across different models. + +We perform our benchmarking on a single NVIDIA H100 GPU with greedy decoding. To ensure that every model generates up to the set maximum number of tokens, we use ignoreEOS=True. Before recording results, we warm up the system with two runs. The final performance metrics are then averaged over three subsequent runs. The inference speeds of the models across batch sizes are shown in Figure 1. M1 achieves a $3 \times$ speedup over similarly-sized transformers when using a batch size of 512 and a decoding length of 4096, demonstrating its effectiveness in large-batch generation settings. + +The maximum length of generated sequences is also an important factor in RL training, as longer sequences allow the model to use more compute during learning by generating longer chains-of-thought, shown in Figure 5. To benchmark our model in this setting, we fix the batch size to 128, and vary the generation length. We compare against the same two models as in the batch size varying case, and the results are shown in Figure 2. As the generated sequence length increases, M1 achieves increasing speedups relative to the baseline models, and consistently generates at least $2x$ faster than Llama-3.2-3B (2.64x faster for the longest sequence length). + +![](images/25786caaf4660f3a50a4304b56f28e47baa7bc387de66e47c6b53666bd7861e6.jpg) +Figure 1: Inference latency when using prompt length 256 and decoding length 4096. + +![](images/44b9013e4fbeaff016b0fc6ef52a6593f403eb5688ad1a5169c408d03809e809.jpg) +Figure 2: Inference latency when using batch size 128. + +It is well-known that LLM inference comprises a prefilling (compute-bound) and a decoding (memory-bound) stage. For math reasoning models, it is common to assume that decoding takes much longer than prefilling, since prefilling only uses a short MATH question, while decoding generates long answers. Under these settings, the process is memory-bound. Given that Mamba is highly memory-efficient and we only use a SSM state size of 16, these memory advantages translate into improved speed. + +# 4.3 Test-Time Scaling + +Given a fixed time budget, M1 can generate more sequences or longer sequences compared to a transformer model, which can hopefully boost its performance. We evaluate the effect of test-time compute scaling on model performance. We scale both the number of samples generated as well as the length of generated samples, to see if M1 benefits from additional compute along these axes. We aim to investigate whether the speed benefit from section 4.2 can translate into an accuracy gain. + +Scaling with majority vote. + +![](images/6b1ca9db7bacc1c9c40422de2135914d4e17cd7faa8f82f90eb9c99c152f264f.jpg) +Figure 3: Number of samples vs. AIME25 accuracy (left) and generation time (seconds) vs. AIME25 accuracy (right). Both graphs include pass@1 and majority voting accuracies for M1 and DeepSeek-R1-Distill-Qwen-1.5B. + +![](images/be3f26d0becaf61488979db217d8ff3026af96e695f0ac6ae72c58686eacbfa9.jpg) + +The left side of Figure 3 shows the effect of scaling the number of generated samples (while fixing the maximum decoding length) on AIME25 accuracy. Both the baseline model and M1 see increasing accuracy as the number of samples increases, with M1 nearly matching the baseline performance for larger sample sizes. The efficient generation of M1 also means that generating large number of samples at test-time is faster than for the baseline transformer model. + +We quantify this efficiency in the right side of Figure 3, which compares the number of seconds spent generating samples against the resulting accuracy. To compute the time values on the x-axis, we find an optimal throughput value (in tokens per second) for each model by increasing batch sizes until throughput decreases. The optimal values were 7263 T/s for DeepSeek-R1-Distill-Qwen-1.5B, and 15169 T/s for M1. We then assume that each generated sample is maximum length (8K), and compute the seconds required for one sample from one model as 8K divided by the throughput. We then convert the left graph of Figure 3 into the right graph, by multiplying the number of samples for each datapoint by the seconds required per sample for each model. As an example, M1 requires roughly a half second (8K/15K) per sample, so the accuracy value for M1 at 32 samples on the left graph appears at approximately 16 seconds on the right graph. + +# Scaling with longer sequences + +Figure 4 shows the effect of scaling the maximum length of the generated answer, while fixing the number of generated samples to one. For both the baseline and M1, increasing the maximum sequence length leads to increased accuracy, as shown in the left graph in Figure 4. After converting from generation length to the seconds required to generate (done in the same way as Figure 3, but dividing the generation length by throughput), we can see the accuracy gain per time spent generating on the right side of Figure 4. In this case, M1 actually gets a higher accuracy for the same amount of time spent generating at 4 of the 5 evaluated sequence lengths, showing the benefits of efficient generation for test-time compute scaling. + +# 5 Analysis + +# Increasing Training Length in RL boosts model performance + +With more efficient models, we can increase the length of sequences used in RL training, resulting in improved performance. Empirically, we see this in Figure 5, which shows an increase in accuracy on AIME25 as we scale up the length of sequences generated when training with GRPO. Training with sequences of maximum length 4096 results in accuracy below $10\%$ , while allowing sequences up to length 24K boosts the accuracy up to $23\%$ . + +MATH Accuracy at each training stage + +![](images/b1e7d6ecf95fe62618e1249eae94f9acf09b3a8ec63f96d97012fbd3e875b444.jpg) +Figure 4: Generation length vs. AIME25 accuracy (left) and generation time (seconds) vs. AIME25 accuracy (right). Sampling for both models is done using a temperature of 0.8. + +![](images/8533bff62a3c4c790920a9ba838a5697f180ffd68c747d3f177580145292f4be.jpg) + +![](images/c2857ba74d1d22cbe528808f49d1edb1ad18d58f21f1c628b98c03710f9be0f0.jpg) + +Figure 5: Pass@1 vs. maximum sequence length in GRPO training + +
MATH500AIME24
Distill380
Distill + SFT(MATH)450
Distill + SFT(MATH) + SFT(Reason)7422
Distill + SFT(MATH) + SFT(Reason) + RL8228
+ +Table 3: M1 Accuracy after each training stage on MATH500 and AIME24. + +To identify which components of our training pipeline have the greatest impact on performance, we also evaluate intermediate versions of the model on MATH500 (Hendrycks et al., 2021) and AIME24 (MAA, 2024). The results of these evaluations are presented in Table 3. Each step of the training pipeline provides a boost to performance, with particularly large gains from fine-tuning on solutions from reasoning models ( $+29\%$ on MATH500 and $+17\%$ on AIME24). + +Direct Distillation from Reasoning Models We also attempted to distill from Deepseek-R1-Qwen-1.5B instead of Llama-3.2-3B. In this case, we did not SFT on OpenMathInstruct, and instead only SFT on the 10B reasoning data that we collected after distillation. We found that the distilled model's performance was poor (38% and 3.3% pass@1 accuracy on MATH500 and AIME24, respectively). Our hypothesis for why this occurs is that 10B tokens is insufficient to effectively transfer reasoning skills from the transformer to Mamba. Although curating a high-quality reasoning dataset demands significant time and effort, we begin by leveraging the standard MATH distillation dataset from OpenMathInstruct (Toshniwal et al., 2024) to first distill a strong MATH model. We then transform this MATH model into a reasoning model via SFT on the dedicated reasoning dataset. This approach achieves strong performance with a much smaller number of reasoning tokens. + +# 6 Conclusion + +In this paper, we introduced M1, a hybrid reasoning model built on the Mamba architecture, designed to address the scalability challenges of the Transformer models. We demonstrated effective techniques for distillation and finetuning to develop M1, which achieves mathematical reasoning performance comparable to state-of-the-art reasoning models of similar size. Notably, M1 delivers over 3x faster inference than similar-sized Transformer models, even when using the heavily optimized vLLM inference engine, particularly at large batch sizes. This improved efficiency can make the resource-intensive inference-time strategies, such as self-consistency, more practical. Our findings establish M1 as a strong alternative to Transformer-based architectures, paving the way for more efficient and high-performing reasoning models. + +# References + +Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697. +Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebron, and Sumit Sanghai. Gqa: Training generalized multi-query transformer models from multi-head checkpoints. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 4895-4901, 2023. +Maximilian Beck, Korbinian Poppel, Markus Spanring, Andreas Auer, Oleksandra Prudnikova, Michael Kopp, Günter Klambauer, Johannes Brandstetter, and Sepp Hochreiter. xlstm: Extended long short-term memory, 2024. URL https://arxiv.org/abs/2405.04517. +Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models, 2024. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute. +Aviv Bick, Kevin Y. Li, Eric P. Xing, J. Zico Kolter, and Albert Gu. Transformers to ssms: Distilling quadratic knowledge to subquadratic models, 2024. URL https://arxiv.org/abs/2408.10189. +Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787. +Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidy Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, and et. al. Evaluating large language models trained on code, 2021. URL https://arxiv.org/abs/2107.03374. +Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567, 2025. +Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems, 2021. URL https://arxiv.org/abs/2110.14168. +Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025. +Tri Dao and Albert Gu. Transformers are ssms: Generalized models and efficient algorithms through structured state space duality, 2024. URL https://arxiv.org/abs/2405.21060. + +DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, and et. al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948. +Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars, 2025. URL https://arxiv.org/abs/2503.01307. +Sachin Goyal, Ziwei Ji, Ankit Singh Rawat, Aditya Krishna Menon, Sanjiv Kumar, and Vaishnavh Nagarajan. Think before you speak: Training language models with pause tokens, 2024. URL https://arxiv.org/abs/2310.02226. +Aaron Grattaftiori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, and et. al. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783. +Albert Gu and Tri Dao. Mamba: Linear-time sequence modeling with selective state spaces, 2024. URL https://arxiv.org/abs/2312.00752. +Albert Gu, Karan Goel, and Christopher Ré. Efficiently modeling long sequences with structured state spaces, 2022. URL https://arxiv.org/abs/2111.00396. +Yuxian Gu, Li Dong, Furu Wei, and Minlie Huang. Minillm: Knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2306.08543. +Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025. +Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems, 2024. +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874. +Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network, 2015. URL https://arxiv.org/abs/1503.02531. +Jungo Kasai, Hao Peng, Yizhe Zhang, Dani Yogatama, Gabriel Ilharco, Nikolaos Pappas, Yi Mao, Weizhu Chen, and Noah A. Smith. Finetuning pretrained transformers into mns, 2021. URL https://arxiv.org/abs/2103.13076. +Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast autoregressive transformers with linear attention, 2020. URL https:// arxiv.org/abs/2006.16236. +Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023. +Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, 2025. Accessed: 2025-01-22. +Xinzhe Li. A survey on llm test-time compute via search: Tasks, llm profiling, search algorithms, and relevant frameworks, 2025. URL https://arxiv.org/abs/2501.10069. + +Opher Lieber, Barak Lenz, Hofit Bata, Gal Cohen, Jhonathan Osin, Itay Dalmedigos, Erez Safahi, Shaked Meirom, Yonatan Belinkov, Shai Shalev-Shwartz, Omri Abend, Raz Alon, Tomer Asida, Amir Bergman, Roman Glozman, Michael Gokhman, Avashalom Manevich, Nir Ratner, Noam Rozen, Erez Shwartz, Mor Zusman, and Yoav Shoham. Jamba: A hybrid transformer-mamba language model, 2024. URL https://arxiv.org/abs/2403.19887. +Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023. URL https://arxiv.org/abs/2305.20050. +Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Meiqi Guo, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision, 2024. URL https://arxiv.org/abs/2406.06592. +Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. DeepScaler: Surpassing o1-preview with a 1.5b model by scaling rl. https://pretty-radio-b75.notion.site/ DeepScaleR-Surpassing-01-Preview-with-a-1-5B-Model-by-Scaling-RL-19681902c1468005bed8ca303013a4e2, 2025. Notion Blog. +MAA. American invitational mathematics examination 2023, 2023. URL https://artofproblemsolving.com/wiki/index.php/American_Invitational_Mathematics_Examination?srltid=AfmBOoqiDCiaGTLQrsRTKsZui8RFnjOZqM4qIqY3yGB3sBaqOaxwf_Xt. +MAA. American invitationalal mathematics examination 2024, 2024. URL https://artofproblemsolving.com/wiki/index.php/American_Invitationalal_Mathematics_Examination?srltid=AfmBOoqiDCiaGTLQrsRTKsZui8RFnj0ZqM4qIqY3yGB3sBaqOaxwf_Xt. +MAA. American invitational mathematics examination 2025, 2025. URL https://artofproblemsolving.com/wiki/index.php/American_Invitational_Mathematics_Examination?srltid=AfmB0oqiDCiaGTLQrsRTKsZui8RFnjOZqM4qIqY3yGB3sBaqOaxwf_Xt. +Jean Mercat, Igor Vasiljevic, Sedrick Keh, Kushal Arora, Achal Dave, Adrien Gaidon, and Thomas Kollar. Linearizing large language models, 2024. URL https://arxiv.org/abs/2405.06640. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candes, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393. +Daniele Paliotta, Junxiong Wang, Matteo Pagliardini, Kevin Y Li, Aviv Bick, J Zico Kolter, Albert Gu, François Fleuret, and Tri Dao. Thinking slow, fast: Scaling inference compute with distilled reasoners. arXiv preprint arXiv:2502.20339, 2025. +Bo Peng, Eric Alcaide, Quentin Anthony, Alon Albalak, Samuel Arcadinho, Stella Biderman, Huanqi Cao, Xin Cheng, Michael Chung, Matteo Grella, Kranthi Kiran GV, Xuzheng He, Haowen Hou, Jiaju Lin, Przemyslaw Kazienko, Jan Kocon, Jiaming Kong, Bartlomiej Koptyra, Hayden Lau, and et. al. Rwkv: Reinventing rnns for the transformer era, 2023. URL https://arxiv.org/abs/2305.13048. +Jacob Pfau, William Merrill, and Samuel R. Bowman. Let's think dot by dot: Hidden computation in transformer language models, 2024. URL https://arxiv.org/abs/2404.15758. +Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers, 2024. URL https://arxiv.org/abs/2408.06195. +Qwen,:, An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, and et. al. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115. + +Tokiniaina Raharison Ralambomihanta, Shahrad Mohammadzadeh, Mohammad Sami Nur Islam, Wassim Jabbour, and Laurence Liang. Scavenging hyena: Distilling transformers into long convolution models, 2024. URL https://arxiv.org/abs/2401.17574. +Liliang Ren, Yang Liu, Yadong Lu, Yelong Shen, Chen Liang, and Weizhu Chen. Samba: Simple hybrid state space models for efficient unlimited context language modeling, 2024. URL https://arxiv.org/abs/2406.07522. +Matthew Renze and Erhan Guven. The effect of sampling temperature on problem solving in large language models, 2024. URL https://arxiv.org/abs/2402.05201. +Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. +Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024. +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https:// arxiv.org/abs/2408.03314. +Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data, 2024. URL https://arxiv.org/abs/2410.01560. +Jonathan Uesato, Nate Kushman, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process- and outcome-based feedback, 2022. URL https://arxiv.org/abs/2211.14275. +Junxiong Wang, Daniele Paliotta, Avner May, Alexander Rush, and Tri Dao. The mamba in the llama: Distilling and accelerating hybrid models. Advances in Neural Information Processing Systems, 37:62432-62457, 2024a. +Junxiong Wang, Daniele Paliotta, Avner May, Alexander M. Rush, and Tri Dao. The mamba in the llama: Distilling and accelerating hybrid models. arXiv preprint arXiv:2408.15237, 2024b. +Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models, 2023. URL https://arxiv.org/abs/2203.11171. +Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. Emergent abilities of large language models, 2022. URL https://arxiv.org/abs/2206.07682. +Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed Chi, Quoc Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models, 2023. URL https://arxiv.org/abs/2201.11903. +Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models, 2024. URL https://arxiv.org/abs/2408.00724. +Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, Chenyang Shao, Yuwei Yan, Qinglong Yang, Yiwen Song, Sijian Ren, Xinyuan Hu, Yu Li, Jie Feng, Chen Gao, and Yong Li. Towards large reasoning models: A survey of reinforced reasoning with large language models, 2025. URL https://arxiv.org/abs/2501.09686. + +Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2402.13116. +Songlin Yang, Bailin Wang, Yikang Shen, Rameswar Panda, and Yoon Kim. Gated linear attention transformers with hardware-efficient training, 2024. URL https://arxiv.org/abs/2312.06635. +Wang Yang, Hongye Jin, Jingfeng Yang, Vipin Chaudhary, and Xiaotian Han. Thinking preference optimization, 2025. URL https://arxiv.org/abs/2502.13173. +Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models, 2023. URL https://arxiv.org/abs/2305.10601. +Jingyang Yuan, Huazuo Gao, Damai Dai, Junyu Luo, Liang Zhao, Zhengyan Zhang, Zhenda Xie, YX Wei, Lean Wang, Zhiping Xiao, et al. Native sparse attention: Hardware-aligned and natively trainable sparse attention. arXiv preprint arXiv:2502.11089, 2025. +Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025. Notion Blog. +Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Restmcts*: Llm self-training via process reward guided tree search, 2024a. URL https://arxiv.org/abs/2406.03816. +Michael Zhang, Simran Arora, Rahul Chalamala, Benjamin Frederick Spector, Alan Wu, Krithik Ramesh, Aaryan Singhal, and Christopher Re. Lolcats: On low-rank linearizing of large language models. In The Thirteenth International Conference on Learning Representations. +Michael Zhang, Kush Bhatia, Hermann Kumbong, and Christopher Ré. The hedgehog & the porcupine: Expressive linear attentions with softmax mimicry, 2024b. URL https://arxiv.org/abs/2402.04347. +Shun Zhang, Zhenfang Chen, Yikang Shen, Mingyu Ding, Joshua B. Tenenbaum, and Chuang Gan. Planning with large language models for code generation, 2023. URL https://arxiv.org/abs/2303.05510. +Xuan Zhang, Fengzhuo Zhang, Cunxiao Du, Chao Du, Tianyu Pang, Wei Gao, and Min Lin. Lighttransfer: Your long-context llm is secretly a hybrid model with effortless adaptation. arXiv preprint arXiv:2410.13846, 2024c. + +# A Limitations and Future Work + +Speedup. Our current hybrid model is only $3 \times$ faster than a Transformer of the same size when serving inference with vLLM. Recently, NVIDIA introduced a new hybrid Mamba kernel7, which could further boost the speed of hybrid models. Additionally, our attention implementation in hybrid models does not yet leverage the optimizations available in vLLM. Integrating M1 into vLLM could further boost performance by taking advantage of these attention speedups. + +Why do we not distill Qwen2.5 1.5B MATH model. We considered using the Qwen2.5 1.5B MATH Instruct model as the distillation target in the first stage. However, we found that the cross entropy loss of the Qwen 1.5B MATH model on the OpenMATH Instruct dataset (Toshniwal et al., 2024) exceeded 1.8, which is much higher than that of the Llama models (0.5). This suggests that, to mimic the Qwen2.5 model, we need a dataset generated from a large Qwen2.5 series model rather than this one generated from the Llama models. Dataset curation from Qwen Math models goes beyond the scope of this work. + +Improvement on RL training speed. Recently, DeepSeek R1 (DeepSeek-AI et al., 2025) showed that reinforcement learning (RL) is a key component in improving model reasoning performance during post-training. Since then, recent research has predominantly relied on reinforcement learning (RL) as a training paradigm for reasoning models. However, training with RL requires the efficient generation of long sequences. For example, in VeRL (Sheng et al., 2024), the typical training batch size ranges from a few thousand to several thousand. DeepscaleR (Luo et al., 2025) also shows a significant accuracy boost when training RL with longer sequences, as it tends to enhance model performance by providing more steps for thorough reasoning. However, this shift towards reinforcement learning has resulted in the generation process becoming a significant bottleneck in reasoning model training, taking more than three times as long as the actor's weight update (forward + backward) according to the time profiling done for DeepscaleR (Luo et al., 2025). This need for efficient generation in RL presents a significant challenge for transformer models, namely due to the heavy computational burden imposed by large key-value caches during generation, especially for large batch sizes. Given their generation speed advantages, linear RNN models may be better suited for scaling RL training. \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10449/images/0ef1f9ee797eae585e8e0f9c0e13c326b96d0b7932710e6595d305162836c6bd.jpg b/data/2025/2504_10xxx/2504.10449/images/0ef1f9ee797eae585e8e0f9c0e13c326b96d0b7932710e6595d305162836c6bd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6691193b8ee0079d015373a53becd912aa9df61 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/0ef1f9ee797eae585e8e0f9c0e13c326b96d0b7932710e6595d305162836c6bd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:696750af7411d758911556f038da7a931c09c8617f43435e1ea5a0b4c613096c +size 32737 diff --git a/data/2025/2504_10xxx/2504.10449/images/25786caaf4660f3a50a4304b56f28e47baa7bc387de66e47c6b53666bd7861e6.jpg b/data/2025/2504_10xxx/2504.10449/images/25786caaf4660f3a50a4304b56f28e47baa7bc387de66e47c6b53666bd7861e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14f971f3c3b9d41b234eee0043f780eccd908242 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/25786caaf4660f3a50a4304b56f28e47baa7bc387de66e47c6b53666bd7861e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b40bf9d55bf15222c21b4e7096101660c5c760a674be100f58248a9fb97c7ac2 +size 24518 diff --git a/data/2025/2504_10xxx/2504.10449/images/44b9013e4fbeaff016b0fc6ef52a6593f403eb5688ad1a5169c408d03809e809.jpg b/data/2025/2504_10xxx/2504.10449/images/44b9013e4fbeaff016b0fc6ef52a6593f403eb5688ad1a5169c408d03809e809.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6267e42cd9a5c5961152a1634b0e88f4d32e258 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/44b9013e4fbeaff016b0fc6ef52a6593f403eb5688ad1a5169c408d03809e809.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99587aafdb3149c6d008add21d2572bbca3c2dcf0b82de74d01e65527e039646 +size 23626 diff --git a/data/2025/2504_10xxx/2504.10449/images/6b1ca9db7bacc1c9c40422de2135914d4e17cd7faa8f82f90eb9c99c152f264f.jpg b/data/2025/2504_10xxx/2504.10449/images/6b1ca9db7bacc1c9c40422de2135914d4e17cd7faa8f82f90eb9c99c152f264f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5d618f4f5fe423d3263d312bff6be53668d7ede --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/6b1ca9db7bacc1c9c40422de2135914d4e17cd7faa8f82f90eb9c99c152f264f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76575e578fb3489002b1a0a131a3fa34bdd694620a40d5130b6fc3ddfe0ca9e3 +size 24560 diff --git a/data/2025/2504_10xxx/2504.10449/images/8533bff62a3c4c790920a9ba838a5697f180ffd68c747d3f177580145292f4be.jpg b/data/2025/2504_10xxx/2504.10449/images/8533bff62a3c4c790920a9ba838a5697f180ffd68c747d3f177580145292f4be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5023d5ae7cb8657a508d768fe6cdc2e8f32702f3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/8533bff62a3c4c790920a9ba838a5697f180ffd68c747d3f177580145292f4be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1339f0f1e1a8585fab8872c836214958f5bf31122c6a5559de0ee5e442244bcb +size 19609 diff --git a/data/2025/2504_10xxx/2504.10449/images/af7b5b0224891cd05c71fc00b371b74a039f51d72feb6613abeb470751bc0fd4.jpg b/data/2025/2504_10xxx/2504.10449/images/af7b5b0224891cd05c71fc00b371b74a039f51d72feb6613abeb470751bc0fd4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50ce5bae46276ec738424af1e7dad120008541ba --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/af7b5b0224891cd05c71fc00b371b74a039f51d72feb6613abeb470751bc0fd4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8eb37cee7a82ca48088bf1b9e5bd7d72c032521b1d108d2ffa41d5c60cb77fa +size 47670 diff --git a/data/2025/2504_10xxx/2504.10449/images/b1e7d6ecf95fe62618e1249eae94f9acf09b3a8ec63f96d97012fbd3e875b444.jpg b/data/2025/2504_10xxx/2504.10449/images/b1e7d6ecf95fe62618e1249eae94f9acf09b3a8ec63f96d97012fbd3e875b444.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9aa3fb3d6a7516f2c02c92f3b3414302e59131ff --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/b1e7d6ecf95fe62618e1249eae94f9acf09b3a8ec63f96d97012fbd3e875b444.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f79bf3d3c23fe597f1a22b5acd323fe03bc73d97d3cc77050cbb6eaa5d51b05 +size 19720 diff --git a/data/2025/2504_10xxx/2504.10449/images/be3f26d0becaf61488979db217d8ff3026af96e695f0ac6ae72c58686eacbfa9.jpg b/data/2025/2504_10xxx/2504.10449/images/be3f26d0becaf61488979db217d8ff3026af96e695f0ac6ae72c58686eacbfa9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e76f43dabf434a53081caaa7c03dd65a425ea52d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/be3f26d0becaf61488979db217d8ff3026af96e695f0ac6ae72c58686eacbfa9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7eeed535d1da4af17cb81dcc5daaaae277ba6ee3f10372ddb41941983b0de1e +size 24806 diff --git a/data/2025/2504_10xxx/2504.10449/images/c2857ba74d1d22cbe528808f49d1edb1ad18d58f21f1c628b98c03710f9be0f0.jpg b/data/2025/2504_10xxx/2504.10449/images/c2857ba74d1d22cbe528808f49d1edb1ad18d58f21f1c628b98c03710f9be0f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4bbd641005a6128bdacdf78ba35c012c5347b96 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/c2857ba74d1d22cbe528808f49d1edb1ad18d58f21f1c628b98c03710f9be0f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1934aaf4ac679221971e28970b8c4fd9110f4aa06de1d2f7f485b72143f58330 +size 17169 diff --git a/data/2025/2504_10xxx/2504.10449/images/cb5fd9277a4e3c4d7b0986a083c78583005c02d65fca2b3bb0a94b47acfa42a6.jpg b/data/2025/2504_10xxx/2504.10449/images/cb5fd9277a4e3c4d7b0986a083c78583005c02d65fca2b3bb0a94b47acfa42a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb52792e276a645ce9c87d297138dac39b5df352 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/cb5fd9277a4e3c4d7b0986a083c78583005c02d65fca2b3bb0a94b47acfa42a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfd689e5da06992f69b164ba052c2311ad71e63012be59277cc644d1d8a552f6 +size 9313 diff --git a/data/2025/2504_10xxx/2504.10449/images/cdeaa06050d1d959a8f1336f74cf629fcd2a423bd195dfe19bbd53c7c8c1e117.jpg b/data/2025/2504_10xxx/2504.10449/images/cdeaa06050d1d959a8f1336f74cf629fcd2a423bd195dfe19bbd53c7c8c1e117.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2862b7f2d4d750e76f3c5b01b8bb3295acb5815f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/cdeaa06050d1d959a8f1336f74cf629fcd2a423bd195dfe19bbd53c7c8c1e117.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7d372c59aacd1474c21327310e5626955fa07b3a9a23912f1c59a6cb3c58c13 +size 9277 diff --git a/data/2025/2504_10xxx/2504.10449/images/e798954b5932a8f37901dd63dee3afc69153018f0f305c98d5378ffc4d993d86.jpg b/data/2025/2504_10xxx/2504.10449/images/e798954b5932a8f37901dd63dee3afc69153018f0f305c98d5378ffc4d993d86.jpg new file mode 100644 index 0000000000000000000000000000000000000000..11f4fff09aa52f4a0b7c30faa3de4e0073072f3c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/images/e798954b5932a8f37901dd63dee3afc69153018f0f305c98d5378ffc4d993d86.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58f369672ed262838ebb2efe02301ae47905afc52644427252bf1490b34863f1 +size 25488 diff --git a/data/2025/2504_10xxx/2504.10449/layout.json b/data/2025/2504_10xxx/2504.10449/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..5ba1b21be1b444905c21fbb4c05be78fae4912d8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10449/layout.json @@ -0,0 +1,7921 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 419, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 419, + 111 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 419, + 111 + ], + "type": "text", + "content": "M1: Towards Scalable Test-Time Compute with Mamba Reasoning Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 194, + 136, + 418, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 136, + 418, + 150 + ], + "spans": [ + { + "bbox": [ + 194, + 136, + 418, + 150 + ], + "type": "text", + "content": "Junxiong Wang" + }, + { + "bbox": [ + 194, + 136, + 418, + 150 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 194, + 136, + 418, + 150 + ], + "type": "text", + "content": ", Wen-Ding Li" + }, + { + "bbox": [ + 194, + 136, + 418, + 150 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 194, + 136, + 418, + 150 + ], + "type": "text", + "content": ", Daniele Paliotta" + }, + { + "bbox": [ + 194, + 136, + 418, + 150 + ], + "type": "inline_equation", + "content": "^{3*}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 202, + 153, + 408, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 153, + 408, + 167 + ], + "spans": [ + { + "bbox": [ + 202, + 153, + 408, + 167 + ], + "type": "text", + "content": "Daniel Ritter2, Alexander M. Rush2, Tri Dao1,4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 129, + 175, + 480, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 175, + 480, + 189 + ], + "spans": [ + { + "bbox": [ + 129, + 175, + 480, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 129, + 175, + 480, + 189 + ], + "type": "text", + "content": "TogetherAI, " + }, + { + "bbox": [ + 129, + 175, + 480, + 189 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 129, + 175, + 480, + 189 + ], + "type": "text", + "content": "Cornell University, " + }, + { + "bbox": [ + 129, + 175, + 480, + 189 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 129, + 175, + 480, + 189 + ], + "type": "text", + "content": "University of Geneva, " + }, + { + "bbox": [ + 129, + 175, + 480, + 189 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 129, + 175, + 480, + 189 + ], + "type": "text", + "content": "Princeton University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 281, + 220, + 331, + 234 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 220, + 331, + 234 + ], + "spans": [ + { + "bbox": [ + 281, + 220, + 331, + 234 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 249, + 471, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 249, + 471, + 481 + ], + "spans": [ + { + "bbox": [ + 140, + 249, + 471, + 481 + ], + "type": "text", + "content": "Effective reasoning is crucial to solving complex mathematical problems. Recent large language models (LLMs) have boosted performance by scaling test-time computation through long chain-of-thought reasoning. However, transformer-based models are inherently limited in extending context length due to their quadratic computational complexity and linear memory requirements. In this paper, we introduce a novel hybrid linear RNN reasoning model, M1, built on the Mamba architecture, which allows memory-efficient inference. Our approach leverages a distillation process from existing reasoning models and is further enhanced through RL training. Experimental results on the AIME and MATH benchmarks show that M1 not only outperforms previous linear RNN models but also matches the performance of state-of-the-art Deepseek R1 distilled reasoning models at a similar scale. We also compare our generation speed with a highly performant general purpose inference engine, vLLM, and observe more than a 3x speedup compared to a same size transformer. With throughput speedup, we are able to achieve higher accuracy compared to DeepSeek R1 distilled transformer reasoning models under a fixed generation time budget using self-consistency voting. Overall, we introduce a hybrid Mamba reasoning model and provide a more effective approach to scaling test-time generation using self-consistency or long chain of thought reasoning. Code and pre-trained checkpoints are open-sourced at github.com/jxiw/M1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 509, + 196, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 509, + 196, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 509, + 196, + 521 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 536, + 506, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 638 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 638 + ], + "type": "text", + "content": "Robust and effective reasoning is the cornerstone for successfully performing tasks in domains such as mathematics and programming. Additionally, performance on reasoning tasks can often be boosted by generating longer sequences and/or generating many sequences in parallel (Snell et al., 2024). However, current transformer-based large language models (LLMs) face significant challenges when tasked with processing long sequences with large batch sizes. These models are constrained by a quadratic increase in computational complexity as the sequence length grows, coupled with a linear escalation in memory requirements. This combination makes it increasingly difficult for models to scale efficiently when handling large inputs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 641, + 507, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 641, + 507, + 708 + ], + "spans": [ + { + "bbox": [ + 104, + 641, + 507, + 708 + ], + "type": "text", + "content": "Although linear hybrid RNN models (Gu & Dao, 2024; Dao & Gu, 2024; Beck et al., 2024; Yang et al., 2024; Peng et al., 2023) have shown great potential as an alternative to transformer-based on general language models, their effectiveness on reasoning tasks remains unclear. Since modern reasoning models typically generate long chains of thought for challenging math questions, it is uncertain whether the performance of hybrid linear RNNs diminishes in such scenarios." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 224, + 37, + 563 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 224, + 37, + 563 + ], + "spans": [ + { + "bbox": [ + 14, + 224, + 37, + 563 + ], + "type": "text", + "content": "arXiv:2504.10449v3 [cs.LG] 9 Sep 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 286, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 286, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 286, + 732 + ], + "type": "text", + "content": "*Work done when interned at TogetherAI" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 160 + ], + "type": "text", + "content": "In this paper, we propose M1 and show that it is possible to derive strong hybrid reasoning models by efficiently transferring reasoning capabilities from a large transformer model. Our training process involves distilling knowledge, incorporating math and reasoning abilities through supervised fine-tuning (SFT), and finally, boosting performance using reinforcement learning (RL) training. In total, the training process requires fewer than 50 billion tokens. In contrast, DeepSeek-R1-Distill-Qwen-1.5B is finetuned from Qwen2.5 MATH 1.5B which is trained using over 1 trillion MATH tokens on top of Qwen2.5." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 165, + 504, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 165, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 504, + 232 + ], + "type": "text", + "content": "We demonstrate that our hybrid models achieve a 3x speedup compared to transformers of the same size when served using a highly performant general purpose inference engine, vLLM, at large batch sizes. This gain is mainly due to large batches and long sequences, decoding being generally memory-bound. Lower memory usage of hybrid models can transform this advantage into a speed gain. The decoding speedup is approximately linear with the volume of model's memory access (Yuan et al., 2025)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 236, + 504, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 236, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 504, + 293 + ], + "type": "text", + "content": "Notably, this speedup can be converted to a gain in reasoning accuracy. Studies (Snell et al., 2024; Li, 2025; Chen et al., 2025) show that techniques such as self-consistency (Wang et al., 2023) and verification (Cobbe et al., 2021) at test time can significantly boost model reasoning performance. Under these conditions, a high-throughput model can further enhance its performance by generating more samples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 297, + 504, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 297, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 297, + 504, + 365 + ], + "type": "text", + "content": "The paper is organized as follows. Section 2 covers related work, Section 3 introduces our pipeline for distilling a hybrid reasoning model, and Section 4.1 presents our results evaluating M1 on math benchmarks. Sections 4.2 and 4.3 evaluate the performance gains of M1 in terms of both inference speed and scaling test-time compute. Section 5 provides some additional analysis of the impact of different generation lengths when training on RL, and of the impact of the different steps of the distillation pipeline we propose on performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 369, + 504, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 504, + 425 + ], + "type": "text", + "content": "Overall, we show that M1 performs on par with DeepSeek-R1-Distill-Qwen-1.5B, achieving scores of 82 on MATH500 (Hendrycks et al., 2021), 23 on AIME25 (MAA, 2025), 28 on AIME24 (MAA, 2024), and 47 on OlympiadBench (He et al., 2024), while offering " + }, + { + "bbox": [ + 104, + 369, + 504, + 425 + ], + "type": "inline_equation", + "content": "3 \\times" + }, + { + "bbox": [ + 104, + 369, + 504, + 425 + ], + "type": "text", + "content": " faster inference throughput, even compared to the highly optimized vLLM (Kwon et al., 2023) implementation for Transformer models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 441, + 201, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 201, + 453 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 201, + 453 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 467, + 215, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 467, + 215, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 467, + 215, + 479 + ], + "type": "text", + "content": "2.1 Reasoning models" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 487, + 506, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 506, + 620 + ], + "type": "text", + "content": "Recent models like Deepseek-R1 (DeepSeek-AI et al., 2025) have shown the potential of RL training to improve performance on verifiable reasoning tasks, such as math problem solving and programming. Additional work has proposed methods for inducing this reasoning behavior via supervised fine-tuning, either on curated data (Muennighoff et al., 2025) or on generated pairs of traces (Yang et al., 2025). Other approaches also combine search procedures such as MCTS with language models (Qi et al., 2024) or alter standard RL training schemes to control the length of generated outputs (Aggarwal & Welleck, 2025). After training, these models solve complex tasks by generating long chains of thought, which often include subtasks of the overall problem, multiple attempted solutions, and backtracking over prior attempts (Gandhi et al., 2025). Since the performance of these models, both during training and inference, relies on generating lengthy chains of thought, more efficient architectures can enable larger scale training and less costly generation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 634, + 369, + 646 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 369, + 646 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 369, + 646 + ], + "type": "text", + "content": "2.2 Enhancing Reasoning via Scaled Inference Compute" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": "Increasing the computational budget during inference has become a promising approach to boost LLM performance. Methods like Chain of Thought (CoT) and its derivatives have achieved notable gains on reasoning benchmarks by breaking down complex tasks into intermediate steps (Wei et al., 2023; Yao et al., 2023). Although decomposing tasks improves reasoning, it also lengthens generation sequences and raises computational costs. Some recent studies even indicate that this extra computation might itself enhance model capabilities (Pfau et al., 2024). In addition, adaptive compute allocation during inference" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 139 + ], + "type": "text", + "content": "has been explored. For example, Goyal et al. (2024) incorporated pause tokens into the vocabulary, allowing models to distribute compute more efficiently and improve both reasoning and overall task performance. LightTransfer (Zhang et al., 2024c) introduces a lightweight method that detects lazy layers and replaces their full attention with streaming attention—slashing KV-cache overhead and boosting throughput." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 143, + 507, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 507, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 507, + 222 + ], + "type": "text", + "content": "Another strategy involves generating several outputs and selecting the best one. Researchers have developed various sampling algorithms to diversify and enhance the quality of generated responses, thereby increasing the chances of retrieving the most accurate answer (Wang et al., 2023; Renze & Guven, 2024; Zhang et al., 2023). Moreover, outcome and process reward models (ORMs and PRMs) have been introduced to evaluate responses and steer intermediate generation steps (Lightman et al., 2023; Zhang et al., 2024a; Luo et al., 2024; Uesato et al., 2022)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 225, + 506, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 225, + 506, + 294 + ], + "spans": [ + { + "bbox": [ + 104, + 225, + 506, + 294 + ], + "type": "text", + "content": "Recent investigations reveal that, under fixed compute budgets, smaller LLMs augmented with inference-time compute techniques (such as majority voting or PRM-guided search) can outperform larger models (Snell et al., 2024; Wu et al., 2024; Beeching et al., 2024). However, these results are mainly confined to Transformer-based architectures, leaving open questions about whether similar scaling laws hold for subquadratic architectures, which offer faster inference but might compromise on expressiveness." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 305, + 320, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 305, + 320, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 305, + 320, + 316 + ], + "type": "text", + "content": "2.3 Alternatives to Transformer Architectures" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 326, + 507, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 326, + 507, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 507, + 471 + ], + "type": "text", + "content": "Even though most reasoning models are based on the Transformer architecture (Grattaftori et al., 2024; Qwen et al., 2025), alternatives have been proposed to alleviate their high computational cost. Models built on top of RNNs (Beck et al., 2024; Peng et al., 2023), state space models (SSMs) (Gu et al., 2022; Gu & Dao, 2024), and linear attention mechanisms (Katharopoulos et al., 2020; Yang et al., 2024) demonstrate superior inference and memory efficiency, particularly for long-context tasks and large-batch generation. The Mamba series (Mamba-1 and Mamba-2) notably introduced selective state spaces to enable linear-time sequence modeling with strong performance (Gu & Dao, 2024; Dao & Gu, 2024). In addition, hybrid architectures that combine a few self-attention layers with subquadratic layers (e.g., Mamba) have emerged, showing advantages over both pure Transformer and pure subquadratic designs (Lieber et al., 2024; Ren et al., 2024). Such architectures are particularly suited to meet the high compute demands of inference-time scaling, and our work investigates their scaling properties." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 483, + 286, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 286, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 286, + 495 + ], + "type": "text", + "content": "2.4 Knowledge Distillation Strategies" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 503, + 507, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 503, + 507, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 503, + 507, + 669 + ], + "type": "text", + "content": "Knowledge distillation has proven to be an effective means of transferring capabilities from large teacher models to smaller, more efficient student models (Hinton et al., 2015). In LLMs, this process compresses a larger pre-trained model into a more compact version while preserving core knowledge and functionality (Gu et al., 2024; Xu et al., 2024). Although larger models tend to exhibit superior reasoning abilities due to scaling properties (Xu et al., 2025; Wei et al., 2022), distillation techniques have enabled smaller models to achieve competitive reasoning performance (DeepSeek-AI et al., 2025; Labs, 2025). While most efforts have focused on intra-architecture distillation (e.g., Transformer-to-Transformer), recent studies have ventured into cross-architecture distillation. For instance, pretrained Transformers have been distilled into architectures such as RNNs (Kasai et al., 2021; Mercat et al., 2024), linear attention models (Zhang et al., 2024b; Zhang et al.), convolutional networks (Ralambomihanta et al., 2024), and SSMs (Bick et al., 2024; Wang et al., 2024b; Paliotta et al., 2025). Whether the robust reasoning abilities of Deepseek R1 (DeepSeek-AI et al., 2025) distilled models can be effectively transferred across different architectures remains an open question." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 684, + 269, + 699 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 684, + 269, + 699 + ], + "spans": [ + { + "bbox": [ + 105, + 684, + 269, + 699 + ], + "type": "text", + "content": "3 The M1 Reasoning Model" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 709, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 507, + 734 + ], + "type": "text", + "content": "In this section, we present a multi-stage process for building our hybrid linear RNN reasoning model, M1. The approach has three stages: distillation, SFT, and RL. We begin by" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 106, + 96, + 504, + 282 + ], + "blocks": [ + { + "bbox": [ + 106, + 82, + 297, + 95 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 297, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 297, + 95 + ], + "type": "text", + "content": "Algorithm 1 Initializing MAMBAINLLAMA" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "lines": [ + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "spans": [ + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": "1: Shapes: B - Batch, L - Length, D - embed size, " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "N = D" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": "/Attention_heads, " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "N'" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": " - expand \n2: Input: " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "o_t" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": ": (B, D) \n3: Output: output: (B, D) \n4: New Params: MLP, A \n5: for each head " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "\\mathbf{W}^K, \\mathbf{W}^Q, \\mathbf{W}^V, \\mathbf{W}^o : (N, D)" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": " after expanding to same dimension do \n6: Head Parameter: A : (N, " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "N'" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": ") \n7: for all positions t: \n8: " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "x_t : (B, N) \\leftarrow \\mathbf{W}^V o_t" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": " \n9: " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_t : (B, N) \\leftarrow \\mathbf{W}^K o_t" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": " \n10: " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_t : (B, N) \\leftarrow \\mathbf{W}^Q o_t" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": " \n11: " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "\\Delta_t : (B, N') \\leftarrow \\mathrm{MLP}(x_t)" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": " \n12: " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "\\overline{\\mathbf{A}}_{1:T}, \\overline{\\mathbf{B}}_{1:T}, \\overline{\\mathbf{C}}_{1:T} : (B, N, N') \\leftarrow \\mathrm{DISC}(\\mathbf{A}, \\mathbf{B}, \\mathbf{C}, \\Delta)" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": " \n13: " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "y \\gets \\mathrm{LINEARRNN}(\\overline{\\mathbf{A}}, \\overline{\\mathbf{B}}, \\overline{\\mathbf{C}}, x)" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": " \n14: output " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": " output + " + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "inline_equation", + "content": "\\mathbf{W}^{OT} y" + }, + { + "bbox": [ + 106, + 96, + 504, + 282 + ], + "type": "text", + "content": " \n15: end for \n16: return output" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + }, + { + "bbox": [ + 104, + 301, + 506, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 506, + 381 + ], + "type": "text", + "content": "distilling a Transformer model into a Mamba architecture, adapting the method of Wang et al. (2024a), which initializes the hybrid model's weights from a transformer model. We then perform math-specific supervised fine-tuning (SFT) on general mathematical datasets to enhance the model's mathematical performance, first without yet incorporating datasets generated by reasoning-focused models, and then with reasoning data leveraging multiple large-scale datasets generated by the R1 model series. Finally, we apply R1's GRPO method to further enhance the model's math reasoning capability." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 392, + 504, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 427 + ], + "type": "text", + "content": "Stage 1: Distillation. The first step in building our M1 model is distilling a pretrained transformer model into a Mamba model. We adapt the distillation approach introduced by Wang et al. (2024a)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": "The MAMBAINLLAMA framework (Wang et al., 2024a) proposes distilling hybrid Transformer-Mamba models by reusing weights from attention layers. In this distillation procedure, outlined in Algorithm 1, linear projections for " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{Q},\\mathbf{K},\\mathbf{V}" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{O}" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": " are initialized from the corresponding projections for " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{C},\\mathbf{B},\\mathbf{X}" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{O}" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": ", respectively. The newly introduced parameters in the Mamba layers are the sampling rate " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": " and the dynamic parameter " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": ", which control the resulting Mamba module via a discretization function. Specifically, the sampling rate " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\Delta \\in \\mathbb{R}^{N'}" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": " discretizes " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_t,\\mathbf{C}_t \\in \\mathbb{R}^{N\\times 1}" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": ", yielding " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\overline{\\mathbf{B}}_t,\\overline{\\mathbf{C}}_t \\in \\mathbb{R}^{N'\\times N\\times 1}" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": ", as detailed in Algorithm 1. Different from Wang et al. (2024a), we introduce two additional linear layers to project from head.dim " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "*" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": " kv_head to head.dim " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "*" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": " n_head. This is because GQA (Ainslie et al., 2023) is used in the transformer model to reduce the KV cache. As Mamba does not utilize a KV cache, this expansion can increase the expressiveness of " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{B}" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 104, + 430, + 506, + 556 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 559, + 507, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 507, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 507, + 651 + ], + "type": "text", + "content": "We directly reuse the MLP layers; however, unlike the original approach, we replace the attention layers with Mamba layers in a single step. Subsequently, we fine-tune the entire model to expedite the training process. The distillation step involves minimizing the token-level KL divergence, aligning the entire probability distribution of the student model, " + }, + { + "bbox": [ + 104, + 559, + 507, + 651 + ], + "type": "inline_equation", + "content": "p(\\cdot ;\\theta)" + }, + { + "bbox": [ + 104, + 559, + 507, + 651 + ], + "type": "text", + "content": ", with the teacher model, " + }, + { + "bbox": [ + 104, + 559, + 507, + 651 + ], + "type": "inline_equation", + "content": "p(\\cdot ;\\theta_T)" + }, + { + "bbox": [ + 104, + 559, + 507, + 651 + ], + "type": "text", + "content": ", for every candidate token at position " + }, + { + "bbox": [ + 104, + 559, + 507, + 651 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 559, + 507, + 651 + ], + "type": "text", + "content": ". We use the reverse KL divergence, " + }, + { + "bbox": [ + 104, + 559, + 507, + 651 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{KL}}(p(\\cdot ;\\theta)\\parallel p(\\cdot ;\\theta_T))" + }, + { + "bbox": [ + 104, + 559, + 507, + 651 + ], + "type": "text", + "content": ", as our loss function rather than the forward KL divergence. We choose the reverse KL divergence due to its mode-seeking properties, which results in improved empirical performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 655, + 507, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 507, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 507, + 712 + ], + "type": "text", + "content": "We reimplement the distillation and SFT framework using the Axolotl " + }, + { + "bbox": [ + 104, + 655, + 507, + 712 + ], + "type": "inline_equation", + "content": "{}^{1}" + }, + { + "bbox": [ + 104, + 655, + 507, + 712 + ], + "type": "text", + "content": " training framework. We apply the model chat template,mask the user prompt,and compute the loss only over the tokens generated in the assistant's output. To speed up training,we use data packing to merge different sequences into a single one until we reach the maximum sequence length which is set to 8192 . We find that data packing achieves significantly better results compared" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 116, + 720, + 318, + 731 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 318, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 318, + 731 + ], + "type": "text", + "content": "1https://github.com/axolotl-ai-cloud/axolotl" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": "to the non-packing version in distillation for the same training steps. We use the AdamW optimizer with learning rate " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " with cosine decay, " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\beta = (0.9, 0.95)" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " and a weight decay of 0.1." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 131, + 504, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 131, + 504, + 187 + ], + "spans": [ + { + "bbox": [ + 104, + 131, + 504, + 187 + ], + "type": "text", + "content": "Stage 2: SFT Following the distillation procedure, we finetune the model on a large set of math problems, OpenMathInstruct-2 (Toshniwal et al., 2024). As in the distillation stage, we apply the chat template to the prompts, mask the user prompt, and compute the loss only over the tokens generated in the assistant's output. We train for two epochs using the same optimizer as distillation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "text", + "content": "After the initial fine-tuning stage, we finetune on an additional set of math problems and solutions generated by reasoning models. We collect a mixed reasoning dataset, including OpenR1-Math-220k" + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "text", + "content": ", OpenThoughts-114k-math" + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "text", + "content": ", and ServiceNow-AI-R1-Distill" + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "text", + "content": ", Magpie-Reasoning-250K" + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "text", + "content": " for a total of 10B reasoning tokens. The first two datasets were generated from R1, while the last two was generated from the R1 distilled Qwen 32B model and R1 distilled Llama 70B model. We extended the training length to 24,576 because we found that it covers " + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "text", + "content": " of the data items. We train the model for five epochs using the same optimizer as before but changing the peak learning rate to " + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "inline_equation", + "content": "6 \\times 10^{-6}" + }, + { + "bbox": [ + 104, + 191, + 506, + 285 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 299, + 504, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 299, + 504, + 356 + ], + "spans": [ + { + "bbox": [ + 104, + 299, + 504, + 356 + ], + "type": "text", + "content": "Stage 3: Reasoning RL. To further enhance performance, we integrate Mamba with a RL pipeline for further training.6 We use GRPO as the loss function. Differing from (Shao et al., 2024), we remove the KL penalty term as empirically we find it destabilizes training. Additionally, we include an entropy bonus to encourage a more diverse policy. The resulting formula is," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 189, + 371, + 505, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 371, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 189, + 371, + 505, + 399 + ], + "type": "interline_equation", + "content": "L _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {\\tau \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}}} \\left[ \\frac {\\pi_ {\\theta} (a | s)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (a | s)} \\hat {A} (s, a) \\right] + \\eta H (\\pi_ {\\theta}) \\tag {1}", + "image_path": "cb5fd9277a4e3c4d7b0986a083c78583005c02d65fca2b3bb0a94b47acfa42a6.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 410, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 410, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 410, + 504, + 491 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 410, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\hat{A}(s, a)" + }, + { + "bbox": [ + 104, + 410, + 504, + 491 + ], + "type": "text", + "content": " is the estimate of the advantage from multiple rollouts. We use a batch size of 128 and a PPO batch size of 64, which also determines the number of PPO iterations, " + }, + { + "bbox": [ + 104, + 410, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\mu = 2" + }, + { + "bbox": [ + 104, + 410, + 504, + 491 + ], + "type": "text", + "content": ". We set the number of generations for each sequence to 8 and the maximum generation length to 32k. For optimization, we use the Adam optimizer with a learning rate of " + }, + { + "bbox": [ + 104, + 410, + 504, + 491 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 104, + 410, + 504, + 491 + ], + "type": "text", + "content": ". We train for 50 steps, and pick the best checkpoint with the highest critic reward. We append the simple prompt \"Let's think step by step and output the final answer within " + }, + { + "bbox": [ + 104, + 410, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\backslash" + }, + { + "bbox": [ + 104, + 410, + 504, + 491 + ], + "type": "text", + "content": "boxed{}\") to the end of each question in both training and evaluation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 506, + 195, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 195, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 195, + 521 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 533, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 504, + 578 + ], + "type": "text", + "content": "Model. We adopt the Llama3.2-3B-Instruct models as distillation target models. For Mamba layers, we set the SSM state size to 16. Consequently, the number of SSM groups after expansion is " + }, + { + "bbox": [ + 104, + 533, + 504, + 578 + ], + "type": "inline_equation", + "content": "3072 / 16 = 192" + }, + { + "bbox": [ + 104, + 533, + 504, + 578 + ], + "type": "text", + "content": " for the 3B model. We use 6 interleaved attention layers among 28 total layers." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 591, + 506, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 591, + 506, + 637 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 506, + 637 + ], + "type": "text", + "content": "Evaluation Dataset. Following common practice in evaluating reasoning models, we use a similar set of math benchmarks, including competition-level problems: MATH500 (Hendrycks et al., 2021), AIME25 (MAA, 2025), AIME24 (MAA, 2024), AMC23 (MAA, 2023), and OlympiadBench (He et al., 2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 646, + 555, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 116, + 646, + 377, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 646, + 377, + 657 + ], + "spans": [ + { + "bbox": [ + 116, + 646, + 377, + 657 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 116, + 646, + 377, + 657 + ], + "type": "text", + "content": "https://huggingface.co/datasets/open-r1/OpenR1-Math-220k" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 118, + 658, + 407, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 658, + 407, + 668 + ], + "spans": [ + { + "bbox": [ + 118, + 658, + 407, + 668 + ], + "type": "text", + "content": "3https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 118, + 669, + 395, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 669, + 395, + 679 + ], + "spans": [ + { + "bbox": [ + 118, + 669, + 395, + 679 + ], + "type": "text", + "content": "4https://huggingface.co/datasets/ServiceNow-AI/R1-Distill-SFT" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 118, + 681, + 555, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 681, + 555, + 691 + ], + "spans": [ + { + "bbox": [ + 118, + 681, + 555, + 691 + ], + "type": "text", + "content": "5https://huggingface.co/datasets/Magpie-Align/Magpie-Reasoning-V2-250K-CoT-Deepseek-R1-Llama-70B" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 692, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 692, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 692, + 504, + 732 + ], + "type": "text", + "content": "6We add it into the popular VeRL (Sheng et al., 2024) framework. In doing so, we addressed and resolved the CUDA graph incompatibility issues that previously arose during training with PyTorch's FSDP module. As a result, the updated framework now efficiently supports Mamba generation with CUDA graph enabled, making it " + }, + { + "bbox": [ + 106, + 692, + 504, + 732 + ], + "type": "inline_equation", + "content": "5 \\times" + }, + { + "bbox": [ + 106, + 692, + 504, + 732 + ], + "type": "text", + "content": " faster than with CUDA Graph disabled" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": "Evaluation Metrics. Our model's performance is assessed using two key metrics: coverage and accuracy. In fields such as coding and formal proofs, where answers can be automatically verified, coverage translates directly to enhanced performance and is widely utilized (Chen et al., 2021; Brown et al., 2024). Coverage is often measured using the pass@k metric, with " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " indicating the number of samples per problem (Chen et al., 2021; Brown et al., 2024). This metric estimates the likelihood that at least one correct solution exists among the " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " samples. To minimize variance when calculating coverage, we employ the unbiased estimation formula from Chen et al. (2021). Specifically, we generate " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "N \\geq k" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " total samples per task. The probability that a correct solution exists among a pool of " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " generated samples can then be determined given the total number of correct solutions " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "C_i" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " for each task." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 191, + 206, + 416, + 240 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 191, + 206, + 416, + 240 + ], + "spans": [ + { + "bbox": [ + 191, + 206, + 416, + 240 + ], + "type": "interline_equation", + "content": "\\text {p a s s} @ \\mathrm {k} = \\frac {1}{\\# \\text {o f p r o b l e m s}} \\sum_ {i = 1} ^ {\\# \\text {o f p r o b l e m s}} \\left(1 - \\frac {\\binom {N - C _ {i}} {k}}{\\binom {N} {k}}\\right)", + "image_path": "cdeaa06050d1d959a8f1336f74cf629fcd2a423bd195dfe19bbd53c7c8c1e117.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "text", + "content": "We implement this formula using a numerically stable approach as recommended by Chen et al. (2021)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 277, + 506, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 506, + 322 + ], + "type": "text", + "content": "When using additional compute, we employ multiple aggregation strategies. The most straightforward method is majority voting, also known as self-consistency decoding (Wang et al., 2023), which takes the majority response among " + }, + { + "bbox": [ + 104, + 277, + 506, + 322 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 277, + 506, + 322 + ], + "type": "text", + "content": " samples as the predicted answer, and uses that to compute the accuracy." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 335, + 230, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 230, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 230, + 348 + ], + "type": "text", + "content": "4.1 Reasoning Evaluation" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 106, + 361, + 502, + 437 + ], + "blocks": [ + { + "bbox": [ + 106, + 361, + 502, + 437 + ], + "lines": [ + { + "bbox": [ + 106, + 361, + 502, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 361, + 502, + 437 + ], + "type": "table", + "html": "
ModelAIME25AIME24MATH500AMC23OlympiadBench
Qwen2.5-Math-7B-Instruct-13.379.850.640.7
rStar-Math-7B (Guan et al., 2025)-26.778.447.547.1
Eurus-2-7B-PRIME (Cui et al., 2025)-26.779.257.842.1
Qwen2.5-7B-SimpleRL (Zeng et al., 2025)-26.782.462.543.3
DeepSeek-R1-Qwen-1.5B23.028.882.862.943.3
M1-3B23.528.982.162.847.3
", + "image_path": "af7b5b0224891cd05c71fc00b371b74a039f51d72feb6613abeb470751bc0fd4.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 106, + 485, + 503, + 529 + ], + "blocks": [ + { + "bbox": [ + 104, + 445, + 504, + 467 + ], + "lines": [ + { + "bbox": [ + 104, + 445, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 504, + 467 + ], + "type": "text", + "content": "Table 1: Evaluation results for M1-3B, DeepSeek-R1-Distill-Qwen-1.5B and other MATH models on MATH benchmarks" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 485, + 503, + 529 + ], + "lines": [ + { + "bbox": [ + 106, + 485, + 503, + 529 + ], + "spans": [ + { + "bbox": [ + 106, + 485, + 503, + 529 + ], + "type": "table", + "html": "
ModelAIME25AIME24MATH500AMC23OlympiadBench
Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32Pass@1Maj@32
DeepSeek-R1-Qwen-1.5B23.035.028.849.282.891.062.954.243.380.3
M1-3B23.534.629.050.582.191.862.855.047.380.1
", + "image_path": "0ef1f9ee797eae585e8e0f9c0e13c326b96d0b7932710e6595d305162836c6bd.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 126, + 536, + 482, + 550 + ], + "lines": [ + { + "bbox": [ + 126, + 536, + 482, + 550 + ], + "spans": [ + { + "bbox": [ + 126, + 536, + 482, + 550 + ], + "type": "text", + "content": "Table 2: Maj@32 results comparing M1-3B with DeepSeek-R1-Distill-Qwen-1.5B." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 560, + 504, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 560, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 560, + 504, + 616 + ], + "type": "text", + "content": "We evaluate our models using a temperature setting of 0.7 and a sequence length of " + }, + { + "bbox": [ + 104, + 560, + 504, + 616 + ], + "type": "inline_equation", + "content": "32\\mathrm{k}" + }, + { + "bbox": [ + 104, + 560, + 504, + 616 + ], + "type": "text", + "content": " with evaluation tools in VeRL. We use " + }, + { + "bbox": [ + 104, + 560, + 504, + 616 + ], + "type": "inline_equation", + "content": "32\\mathrm{k}" + }, + { + "bbox": [ + 104, + 560, + 504, + 616 + ], + "type": "text", + "content": " because it has become the standard for evaluating performance on reasoning models (DeepSeek-AI et al., 2025; Luo et al., 2025). We report the pass@1 metric averaged over 64 runs; for majority voting, we repeat the metric calculation 100 times." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "We report the accuracy of M1-3B and DeepSeek-R1-Distill-Qwen-1.5B in Table 1 and 2. We use the baseline DeepSeek-R1-Distill-Qwen-1.5B since a 3B R1 reasoning model is still not available. Although M1-3B has more parameters than DeepSeek-R1-Distill-Qwen-1.5B, its speed is still comparable even with shorter contexts, so we believe this is a fair comparison. Our model's performance is competitive with state-of-the-art open reasoning models in the same model size range and outperforms larger nonreasoning math transformer models. Our model performs slightly worse on AIME24 compared to the DeepSeek-R1-Distill-Qwen-1.5B model. Notably, DeepSeek-R1-Distill-Qwen-1.5B is built on top of the Qwen2.5 MATH models, which were finetuned with over 1T MATH tokens on top of the Qwen2.5 models, significantly more training data than what M1-3B used in total." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 211, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 211, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 211, + 94 + ], + "type": "text", + "content": "4.2 Speed Evaluation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 102, + 506, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 170 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 170 + ], + "type": "text", + "content": "We benchmark inference time with our model against a transformer model (Llama-3.2.-3B (Grattafori et al., 2024)) of the same size. We use vLLM (version 0.6.3), which is the version used in VeRL for efficient rollouts. We also compare against DeepSeek-R1-Distill-Qwen-1.5B (DeepSeek-AI et al., 2025), a reasoning transformer model that is half the size of M1. This model has the same number of layers as the 3B parameter transformer, but the hidden dimension is half the size." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 175, + 504, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 175, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 504, + 220 + ], + "type": "text", + "content": "According to Luo et al. (2025), the average generation length of reasoning models on MATH questions is " + }, + { + "bbox": [ + 104, + 175, + 504, + 220 + ], + "type": "inline_equation", + "content": "4\\mathrm{k}" + }, + { + "bbox": [ + 104, + 175, + 504, + 220 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 175, + 504, + 220 + ], + "type": "inline_equation", + "content": "5\\mathrm{k}" + }, + { + "bbox": [ + 104, + 175, + 504, + 220 + ], + "type": "text", + "content": ". We therefore fix a decoding length of 4096 (and prompt length of 256) and benchmark our model across a range of batch sizes. We vary the batch size from 8 to 512, measuring the inference latency across different models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 224, + 506, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 224, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 506, + 304 + ], + "type": "text", + "content": "We perform our benchmarking on a single NVIDIA H100 GPU with greedy decoding. To ensure that every model generates up to the set maximum number of tokens, we use ignoreEOS=True. Before recording results, we warm up the system with two runs. The final performance metrics are then averaged over three subsequent runs. The inference speeds of the models across batch sizes are shown in Figure 1. M1 achieves a " + }, + { + "bbox": [ + 104, + 224, + 506, + 304 + ], + "type": "inline_equation", + "content": "3 \\times" + }, + { + "bbox": [ + 104, + 224, + 506, + 304 + ], + "type": "text", + "content": " speedup over similarly-sized transformers when using a batch size of 512 and a decoding length of 4096, demonstrating its effectiveness in large-batch generation settings." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 307, + 504, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 307, + 504, + 398 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 504, + 398 + ], + "type": "text", + "content": "The maximum length of generated sequences is also an important factor in RL training, as longer sequences allow the model to use more compute during learning by generating longer chains-of-thought, shown in Figure 5. To benchmark our model in this setting, we fix the batch size to 128, and vary the generation length. We compare against the same two models as in the batch size varying case, and the results are shown in Figure 2. As the generated sequence length increases, M1 achieves increasing speedups relative to the baseline models, and consistently generates at least " + }, + { + "bbox": [ + 104, + 307, + 504, + 398 + ], + "type": "inline_equation", + "content": "2x" + }, + { + "bbox": [ + 104, + 307, + 504, + 398 + ], + "type": "text", + "content": " faster than Llama-3.2-3B (2.64x faster for the longest sequence length)." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 108, + 407, + 298, + 518 + ], + "blocks": [ + { + "bbox": [ + 108, + 407, + 298, + 518 + ], + "lines": [ + { + "bbox": [ + 108, + 407, + 298, + 518 + ], + "spans": [ + { + "bbox": [ + 108, + 407, + 298, + 518 + ], + "type": "image", + "image_path": "25786caaf4660f3a50a4304b56f28e47baa7bc387de66e47c6b53666bd7861e6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 112, + 521, + 291, + 553 + ], + "lines": [ + { + "bbox": [ + 112, + 521, + 291, + 553 + ], + "spans": [ + { + "bbox": [ + 112, + 521, + 291, + 553 + ], + "type": "text", + "content": "Figure 1: Inference latency when using prompt length 256 and decoding length 4096." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 314, + 407, + 503, + 519 + ], + "blocks": [ + { + "bbox": [ + 314, + 407, + 503, + 519 + ], + "lines": [ + { + "bbox": [ + 314, + 407, + 503, + 519 + ], + "spans": [ + { + "bbox": [ + 314, + 407, + 503, + 519 + ], + "type": "image", + "image_path": "44b9013e4fbeaff016b0fc6ef52a6593f403eb5688ad1a5169c408d03809e809.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 320, + 521, + 494, + 542 + ], + "lines": [ + { + "bbox": [ + 320, + 521, + 494, + 542 + ], + "spans": [ + { + "bbox": [ + 320, + 521, + 494, + 542 + ], + "type": "text", + "content": "Figure 2: Inference latency when using batch size 128." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 564, + 504, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 564, + 504, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 564, + 504, + 633 + ], + "type": "text", + "content": "It is well-known that LLM inference comprises a prefilling (compute-bound) and a decoding (memory-bound) stage. For math reasoning models, it is common to assume that decoding takes much longer than prefilling, since prefilling only uses a short MATH question, while decoding generates long answers. Under these settings, the process is memory-bound. Given that Mamba is highly memory-efficient and we only use a SSM state size of 16, these memory advantages translate into improved speed." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 644, + 212, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 644, + 212, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 644, + 212, + 658 + ], + "type": "text", + "content": "4.3 Test-Time Scaling" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 665, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 504, + 734 + ], + "type": "text", + "content": "Given a fixed time budget, M1 can generate more sequences or longer sequences compared to a transformer model, which can hopefully boost its performance. We evaluate the effect of test-time compute scaling on model performance. We scale both the number of samples generated as well as the length of generated samples, to see if M1 benefits from additional compute along these axes. We aim to investigate whether the speed benefit from section 4.2 can translate into an accuracy gain." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 231, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 231, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 231, + 95 + ], + "type": "text", + "content": "Scaling with majority vote." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 126, + 119, + 297, + 227 + ], + "blocks": [ + { + "bbox": [ + 126, + 119, + 297, + 227 + ], + "lines": [ + { + "bbox": [ + 126, + 119, + 297, + 227 + ], + "spans": [ + { + "bbox": [ + 126, + 119, + 297, + 227 + ], + "type": "image", + "image_path": "6b1ca9db7bacc1c9c40422de2135914d4e17cd7faa8f82f90eb9c99c152f264f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 235, + 504, + 269 + ], + "lines": [ + { + "bbox": [ + 104, + 235, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 235, + 504, + 269 + ], + "type": "text", + "content": "Figure 3: Number of samples vs. AIME25 accuracy (left) and generation time (seconds) vs. AIME25 accuracy (right). Both graphs include pass@1 and majority voting accuracies for M1 and DeepSeek-R1-Distill-Qwen-1.5B." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 305, + 119, + 477, + 225 + ], + "blocks": [ + { + "bbox": [ + 305, + 119, + 477, + 225 + ], + "lines": [ + { + "bbox": [ + 305, + 119, + 477, + 225 + ], + "spans": [ + { + "bbox": [ + 305, + 119, + 477, + 225 + ], + "type": "image", + "image_path": "be3f26d0becaf61488979db217d8ff3026af96e695f0ac6ae72c58686eacbfa9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 282, + 504, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 282, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 504, + 348 + ], + "type": "text", + "content": "The left side of Figure 3 shows the effect of scaling the number of generated samples (while fixing the maximum decoding length) on AIME25 accuracy. Both the baseline model and M1 see increasing accuracy as the number of samples increases, with M1 nearly matching the baseline performance for larger sample sizes. The efficient generation of M1 also means that generating large number of samples at test-time is faster than for the baseline transformer model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 354, + 506, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 354, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 506, + 477 + ], + "type": "text", + "content": "We quantify this efficiency in the right side of Figure 3, which compares the number of seconds spent generating samples against the resulting accuracy. To compute the time values on the x-axis, we find an optimal throughput value (in tokens per second) for each model by increasing batch sizes until throughput decreases. The optimal values were 7263 T/s for DeepSeek-R1-Distill-Qwen-1.5B, and 15169 T/s for M1. We then assume that each generated sample is maximum length (8K), and compute the seconds required for one sample from one model as 8K divided by the throughput. We then convert the left graph of Figure 3 into the right graph, by multiplying the number of samples for each datapoint by the seconds required per sample for each model. As an example, M1 requires roughly a half second (8K/15K) per sample, so the accuracy value for M1 at 32 samples on the left graph appears at approximately 16 seconds on the right graph." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 481, + 246, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 481, + 246, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 246, + 493 + ], + "type": "text", + "content": "Scaling with longer sequences" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 498, + 506, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 498, + 506, + 598 + ], + "spans": [ + { + "bbox": [ + 104, + 498, + 506, + 598 + ], + "type": "text", + "content": "Figure 4 shows the effect of scaling the maximum length of the generated answer, while fixing the number of generated samples to one. For both the baseline and M1, increasing the maximum sequence length leads to increased accuracy, as shown in the left graph in Figure 4. After converting from generation length to the seconds required to generate (done in the same way as Figure 3, but dividing the generation length by throughput), we can see the accuracy gain per time spent generating on the right side of Figure 4. In this case, M1 actually gets a higher accuracy for the same amount of time spent generating at 4 of the 5 evaluated sequence lengths, showing the benefits of efficient generation for test-time compute scaling." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 616, + 174, + 630 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 174, + 630 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 174, + 630 + ], + "type": "text", + "content": "5 Analysis" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 643, + 382, + 655 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 643, + 382, + 655 + ], + "spans": [ + { + "bbox": [ + 105, + 643, + 382, + 655 + ], + "type": "text", + "content": "Increasing Training Length in RL boosts model performance" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "content": "With more efficient models, we can increase the length of sequences used in RL training, resulting in improved performance. Empirically, we see this in Figure 5, which shows an increase in accuracy on AIME25 as we scale up the length of sequences generated when training with GRPO. Training with sequences of maximum length 4096 results in accuracy below " + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "content": ", while allowing sequences up to length 24K boosts the accuracy up to " + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "inline_equation", + "content": "23\\%" + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 720, + 282, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 282, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 282, + 733 + ], + "type": "text", + "content": "MATH Accuracy at each training stage" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 81, + 299, + 193 + ], + "blocks": [ + { + "bbox": [ + 111, + 81, + 299, + 193 + ], + "lines": [ + { + "bbox": [ + 111, + 81, + 299, + 193 + ], + "spans": [ + { + "bbox": [ + 111, + 81, + 299, + 193 + ], + "type": "image", + "image_path": "b1e7d6ecf95fe62618e1249eae94f9acf09b3a8ec63f96d97012fbd3e875b444.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 203, + 506, + 228 + ], + "lines": [ + { + "bbox": [ + 104, + 203, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 203, + 506, + 228 + ], + "type": "text", + "content": "Figure 4: Generation length vs. AIME25 accuracy (left) and generation time (seconds) vs. AIME25 accuracy (right). Sampling for both models is done using a temperature of 0.8." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 309, + 81, + 502, + 193 + ], + "blocks": [ + { + "bbox": [ + 309, + 81, + 502, + 193 + ], + "lines": [ + { + "bbox": [ + 309, + 81, + 502, + 193 + ], + "spans": [ + { + "bbox": [ + 309, + 81, + 502, + 193 + ], + "type": "image", + "image_path": "8533bff62a3c4c790920a9ba838a5697f180ffd68c747d3f177580145292f4be.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 220, + 261, + 389, + 384 + ], + "blocks": [ + { + "bbox": [ + 220, + 261, + 389, + 384 + ], + "lines": [ + { + "bbox": [ + 220, + 261, + 389, + 384 + ], + "spans": [ + { + "bbox": [ + 220, + 261, + 389, + 384 + ], + "type": "image", + "image_path": "c2857ba74d1d22cbe528808f49d1edb1ad18d58f21f1c628b98c03710f9be0f0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 154, + 413, + 457, + 480 + ], + "blocks": [ + { + "bbox": [ + 157, + 399, + 452, + 412 + ], + "lines": [ + { + "bbox": [ + 157, + 399, + 452, + 412 + ], + "spans": [ + { + "bbox": [ + 157, + 399, + 452, + 412 + ], + "type": "text", + "content": "Figure 5: Pass@1 vs. maximum sequence length in GRPO training" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 154, + 413, + 457, + 480 + ], + "lines": [ + { + "bbox": [ + 154, + 413, + 457, + 480 + ], + "spans": [ + { + "bbox": [ + 154, + 413, + 457, + 480 + ], + "type": "table", + "html": "
MATH500AIME24
Distill380
Distill + SFT(MATH)450
Distill + SFT(MATH) + SFT(Reason)7422
Distill + SFT(MATH) + SFT(Reason) + RL8228
", + "image_path": "e798954b5932a8f37901dd63dee3afc69153018f0f305c98d5378ffc4d993d86.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 139, + 487, + 470, + 500 + ], + "lines": [ + { + "bbox": [ + 139, + 487, + 470, + 500 + ], + "spans": [ + { + "bbox": [ + 139, + 487, + 470, + 500 + ], + "type": "text", + "content": "Table 3: M1 Accuracy after each training stage on MATH500 and AIME24." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 539, + 506, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 506, + 606 + ], + "type": "text", + "content": "To identify which components of our training pipeline have the greatest impact on performance, we also evaluate intermediate versions of the model on MATH500 (Hendrycks et al., 2021) and AIME24 (MAA, 2024). The results of these evaluations are presented in Table 3. Each step of the training pipeline provides a boost to performance, with particularly large gains from fine-tuning on solutions from reasoning models (" + }, + { + "bbox": [ + 104, + 539, + 506, + 606 + ], + "type": "inline_equation", + "content": "+29\\%" + }, + { + "bbox": [ + 104, + 539, + 506, + 606 + ], + "type": "text", + "content": " on MATH500 and " + }, + { + "bbox": [ + 104, + 539, + 506, + 606 + ], + "type": "inline_equation", + "content": "+17\\%" + }, + { + "bbox": [ + 104, + 539, + 506, + 606 + ], + "type": "text", + "content": " on AIME24)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 610, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 610, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 610, + 506, + 733 + ], + "type": "text", + "content": "Direct Distillation from Reasoning Models We also attempted to distill from Deepseek-R1-Qwen-1.5B instead of Llama-3.2-3B. In this case, we did not SFT on OpenMathInstruct, and instead only SFT on the 10B reasoning data that we collected after distillation. We found that the distilled model's performance was poor (38% and 3.3% pass@1 accuracy on MATH500 and AIME24, respectively). Our hypothesis for why this occurs is that 10B tokens is insufficient to effectively transfer reasoning skills from the transformer to Mamba. Although curating a high-quality reasoning dataset demands significant time and effort, we begin by leveraging the standard MATH distillation dataset from OpenMathInstruct (Toshniwal et al., 2024) to first distill a strong MATH model. We then transform this MATH model into a reasoning model via SFT on the dedicated reasoning dataset. This approach achieves strong performance with a much smaller number of reasoning tokens." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 189, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 189, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 189, + 94 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 106, + 506, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 217 + ], + "type": "text", + "content": "In this paper, we introduced M1, a hybrid reasoning model built on the Mamba architecture, designed to address the scalability challenges of the Transformer models. We demonstrated effective techniques for distillation and finetuning to develop M1, which achieves mathematical reasoning performance comparable to state-of-the-art reasoning models of similar size. Notably, M1 delivers over 3x faster inference than similar-sized Transformer models, even when using the heavily optimized vLLM inference engine, particularly at large batch sizes. This improved efficiency can make the resource-intensive inference-time strategies, such as self-consistency, more practical. Our findings establish M1 as a strong alternative to Transformer-based architectures, paving the way for more efficient and high-performing reasoning models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 233, + 168, + 246 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 233, + 168, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 168, + 246 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 251, + 507, + 733 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 106, + 251, + 505, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 251, + 505, + 275 + ], + "spans": [ + { + "bbox": [ + 106, + 251, + 505, + 275 + ], + "type": "text", + "content": "Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 282, + 506, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 282, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 106, + 282, + 506, + 328 + ], + "type": "text", + "content": "Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebron, and Sumit Sanghai. Gqa: Training generalized multi-query transformer models from multi-head checkpoints. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 4895-4901, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 335, + 507, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 335, + 507, + 370 + ], + "spans": [ + { + "bbox": [ + 106, + 335, + 507, + 370 + ], + "type": "text", + "content": "Maximilian Beck, Korbinian Poppel, Markus Spanring, Andreas Auer, Oleksandra Prudnikova, Michael Kopp, Günter Klambauer, Johannes Brandstetter, and Sepp Hochreiter. xlstm: Extended long short-term memory, 2024. URL https://arxiv.org/abs/2405.04517." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 376, + 505, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 376, + 505, + 410 + ], + "spans": [ + { + "bbox": [ + 106, + 376, + 505, + 410 + ], + "type": "text", + "content": "Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models, 2024. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 417, + 505, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 417, + 505, + 451 + ], + "spans": [ + { + "bbox": [ + 106, + 417, + 505, + 451 + ], + "type": "text", + "content": "Aviv Bick, Kevin Y. Li, Eric P. Xing, J. Zico Kolter, and Albert Gu. Transformers to ssms: Distilling quadratic knowledge to subquadratic models, 2024. URL https://arxiv.org/abs/2408.10189." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 459, + 505, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 459, + 505, + 494 + ], + "spans": [ + { + "bbox": [ + 106, + 459, + 505, + 494 + ], + "type": "text", + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 499, + 506, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 499, + 506, + 556 + ], + "spans": [ + { + "bbox": [ + 106, + 499, + 506, + 556 + ], + "type": "text", + "content": "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidy Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, and et. al. Evaluating large language models trained on code, 2021. URL https://arxiv.org/abs/2107.03374." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 563, + 506, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 563, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 106, + 563, + 506, + 608 + ], + "type": "text", + "content": "Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 615, + 505, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 615, + 505, + 660 + ], + "spans": [ + { + "bbox": [ + 106, + 615, + 505, + 660 + ], + "type": "text", + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems, 2021. URL https://arxiv.org/abs/2110.14168." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 668, + 506, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 668, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 106, + 668, + 506, + 702 + ], + "type": "text", + "content": "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 708, + 504, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 708, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 106, + 708, + 504, + 733 + ], + "type": "text", + "content": "Tri Dao and Albert Gu. Transformers are ssms: Generalized models and efficient algorithms through structured state space duality, 2024. URL https://arxiv.org/abs/2405.21060." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 733 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 106, + 81, + 507, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 507, + 138 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 507, + 138 + ], + "type": "text", + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, and et. al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 144, + 507, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 144, + 507, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 507, + 178 + ], + "type": "text", + "content": "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars, 2025. URL https://arxiv.org/abs/2503.01307." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 184, + 505, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 184, + 505, + 218 + ], + "spans": [ + { + "bbox": [ + 105, + 184, + 505, + 218 + ], + "type": "text", + "content": "Sachin Goyal, Ziwei Ji, Ankit Singh Rawat, Aditya Krishna Menon, Sanjiv Kumar, and Vaishnavh Nagarajan. Think before you speak: Training language models with pause tokens, 2024. URL https://arxiv.org/abs/2310.02226." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 224, + 507, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 224, + 507, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 507, + 281 + ], + "type": "text", + "content": "Aaron Grattaftiori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, and et. al. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 287, + 507, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 287, + 507, + 311 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 507, + 311 + ], + "type": "text", + "content": "Albert Gu and Tri Dao. Mamba: Linear-time sequence modeling with selective state spaces, 2024. URL https://arxiv.org/abs/2312.00752." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 316, + 505, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 505, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 505, + 340 + ], + "type": "text", + "content": "Albert Gu, Karan Goel, and Christopher Ré. Efficiently modeling long sequences with structured state spaces, 2022. URL https://arxiv.org/abs/2111.00396." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 346, + 505, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 346, + 505, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 505, + 370 + ], + "type": "text", + "content": "Yuxian Gu, Li Dong, Furu Wei, and Minlie Huang. Minillm: Knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2306.08543." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 375, + 507, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 375, + 507, + 409 + ], + "spans": [ + { + "bbox": [ + 105, + 375, + 507, + 409 + ], + "type": "text", + "content": "Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 415, + 507, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 507, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 507, + 460 + ], + "type": "text", + "content": "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 467, + 507, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 467, + 507, + 501 + ], + "spans": [ + { + "bbox": [ + 105, + 467, + 507, + 501 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 507, + 507, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 507, + 507, + 531 + ], + "spans": [ + { + "bbox": [ + 105, + 507, + 507, + 531 + ], + "type": "text", + "content": "Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network, 2015. URL https://arxiv.org/abs/1503.02531." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 536, + 507, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 536, + 507, + 571 + ], + "spans": [ + { + "bbox": [ + 105, + 536, + 507, + 571 + ], + "type": "text", + "content": "Jungo Kasai, Hao Peng, Yizhe Zhang, Dani Yogatama, Gabriel Ilharco, Nikolaos Pappas, Yi Mao, Weizhu Chen, and Noah A. Smith. Finetuning pretrained transformers into mns, 2021. URL https://arxiv.org/abs/2103.13076." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 576, + 507, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 507, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 507, + 611 + ], + "type": "text", + "content": "Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast autoregressive transformers with linear attention, 2020. URL https:// arxiv.org/abs/2006.16236." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 616, + 507, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 507, + 663 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 507, + 663 + ], + "type": "text", + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 669, + 507, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 669, + 507, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 669, + 507, + 703 + ], + "type": "text", + "content": "Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, 2025. Accessed: 2025-01-22." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 708, + 507, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 507, + 733 + ], + "type": "text", + "content": "Xinzhe Li. A survey on llm test-time compute via search: Tasks, llm profiling, search algorithms, and relevant frameworks, 2025. URL https://arxiv.org/abs/2501.10069." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 611, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 106, + 81, + 507, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 507, + 139 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 507, + 139 + ], + "type": "text", + "content": "Opher Lieber, Barak Lenz, Hofit Bata, Gal Cohen, Jhonathan Osin, Itay Dalmedigos, Erez Safahi, Shaked Meirom, Yonatan Belinkov, Shai Shalev-Shwartz, Omri Abend, Raz Alon, Tomer Asida, Amir Bergman, Roman Glozman, Michael Gokhman, Avashalom Manevich, Nir Ratner, Noam Rozen, Erez Shwartz, Mor Zusman, and Yoav Shoham. Jamba: A hybrid transformer-mamba language model, 2024. URL https://arxiv.org/abs/2403.19887." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 144, + 507, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 144, + 507, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 144, + 507, + 177 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023. URL https://arxiv.org/abs/2305.20050." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 183, + 507, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 183, + 507, + 228 + ], + "spans": [ + { + "bbox": [ + 106, + 183, + 507, + 228 + ], + "type": "text", + "content": "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Meiqi Guo, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision, 2024. URL https://arxiv.org/abs/2406.06592." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 234, + 611, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 234, + 611, + 302 + ], + "spans": [ + { + "bbox": [ + 106, + 234, + 611, + 302 + ], + "type": "text", + "content": "Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. DeepScaler: Surpassing o1-preview with a 1.5b model by scaling rl. https://pretty-radio-b75.notion.site/ DeepScaleR-Surpassing-01-Preview-with-a-1-5B-Model-by-Scaling-RL-19681902c1468005bed8ca303013a4e2, 2025. Notion Blog." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 307, + 507, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 307, + 507, + 341 + ], + "spans": [ + { + "bbox": [ + 106, + 307, + 507, + 341 + ], + "type": "text", + "content": "MAA. American invitational mathematics examination 2023, 2023. URL https://artofproblemsolving.com/wiki/index.php/American_Invitational_Mathematics_Examination?srltid=AfmBOoqiDCiaGTLQrsRTKsZui8RFnjOZqM4qIqY3yGB3sBaqOaxwf_Xt." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 346, + 507, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 346, + 507, + 380 + ], + "spans": [ + { + "bbox": [ + 106, + 346, + 507, + 380 + ], + "type": "text", + "content": "MAA. American invitationalal mathematics examination 2024, 2024. URL https://artofproblemsolving.com/wiki/index.php/American_Invitationalal_Mathematics_Examination?srltid=AfmBOoqiDCiaGTLQrsRTKsZui8RFnj0ZqM4qIqY3yGB3sBaqOaxwf_Xt." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 386, + 507, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 386, + 507, + 420 + ], + "spans": [ + { + "bbox": [ + 106, + 386, + 507, + 420 + ], + "type": "text", + "content": "MAA. American invitational mathematics examination 2025, 2025. URL https://artofproblemsolving.com/wiki/index.php/American_Invitational_Mathematics_Examination?srltid=AfmB0oqiDCiaGTLQrsRTKsZui8RFnjOZqM4qIqY3yGB3sBaqOaxwf_Xt." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 426, + 507, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 426, + 507, + 460 + ], + "spans": [ + { + "bbox": [ + 106, + 426, + 507, + 460 + ], + "type": "text", + "content": "Jean Mercat, Igor Vasiljevic, Sedrick Keh, Kushal Arora, Achal Dave, Adrien Gaidon, and Thomas Kollar. Linearizing large language models, 2024. URL https://arxiv.org/abs/2405.06640." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 465, + 507, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 465, + 507, + 500 + ], + "spans": [ + { + "bbox": [ + 106, + 465, + 507, + 500 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candes, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 506, + 507, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 506, + 507, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 507, + 540 + ], + "type": "text", + "content": "Daniele Paliotta, Junxiong Wang, Matteo Pagliardini, Kevin Y Li, Aviv Bick, J Zico Kolter, Albert Gu, François Fleuret, and Tri Dao. Thinking slow, fast: Scaling inference compute with distilled reasoners. arXiv preprint arXiv:2502.20339, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 544, + 507, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 544, + 507, + 601 + ], + "spans": [ + { + "bbox": [ + 106, + 544, + 507, + 601 + ], + "type": "text", + "content": "Bo Peng, Eric Alcaide, Quentin Anthony, Alon Albalak, Samuel Arcadinho, Stella Biderman, Huanqi Cao, Xin Cheng, Michael Chung, Matteo Grella, Kranthi Kiran GV, Xuzheng He, Haowen Hou, Jiaju Lin, Przemyslaw Kazienko, Jan Kocon, Jiaming Kong, Bartlomiej Koptyra, Hayden Lau, and et. al. Rwkv: Reinventing rnns for the transformer era, 2023. URL https://arxiv.org/abs/2305.13048." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 607, + 507, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 607, + 507, + 640 + ], + "spans": [ + { + "bbox": [ + 106, + 607, + 507, + 640 + ], + "type": "text", + "content": "Jacob Pfau, William Merrill, and Samuel R. Bowman. Let's think dot by dot: Hidden computation in transformer language models, 2024. URL https://arxiv.org/abs/2404.15758." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 647, + 507, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 647, + 507, + 680 + ], + "spans": [ + { + "bbox": [ + 106, + 647, + 507, + 680 + ], + "type": "text", + "content": "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers, 2024. URL https://arxiv.org/abs/2408.06195." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 686, + 507, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 686, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 686, + 507, + 732 + ], + "type": "text", + "content": "Qwen,:, An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, and et. al. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 505, + 117 + ], + "type": "text", + "content": "Tokiniaina Raharison Ralambomihanta, Shahrad Mohammadzadeh, Mohammad Sami Nur Islam, Wassim Jabbour, and Laurence Liang. Scavenging hyena: Distilling transformers into long convolution models, 2024. URL https://arxiv.org/abs/2401.17574." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 124, + 506, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 124, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 106, + 124, + 506, + 159 + ], + "type": "text", + "content": "Liliang Ren, Yang Liu, Yadong Lu, Yelong Shen, Chen Liang, and Weizhu Chen. Samba: Simple hybrid state space models for efficient unlimited context language modeling, 2024. URL https://arxiv.org/abs/2406.07522." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 166, + 504, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 166, + 504, + 191 + ], + "spans": [ + { + "bbox": [ + 106, + 166, + 504, + 191 + ], + "type": "text", + "content": "Matthew Renze and Erhan Guven. The effect of sampling temperature on problem solving in large language models, 2024. URL https://arxiv.org/abs/2402.05201." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 198, + 505, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 198, + 505, + 233 + ], + "spans": [ + { + "bbox": [ + 106, + 198, + 505, + 233 + ], + "type": "text", + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 240, + 505, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 240, + 505, + 275 + ], + "spans": [ + { + "bbox": [ + 106, + 240, + 505, + 275 + ], + "type": "text", + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 282, + 505, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 282, + 505, + 317 + ], + "spans": [ + { + "bbox": [ + 106, + 282, + 505, + 317 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https:// arxiv.org/abs/2408.03314." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 325, + 505, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 325, + 505, + 360 + ], + "spans": [ + { + "bbox": [ + 106, + 325, + 505, + 360 + ], + "type": "text", + "content": "Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data, 2024. URL https://arxiv.org/abs/2410.01560." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 368, + 505, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 368, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 106, + 368, + 505, + 403 + ], + "type": "text", + "content": "Jonathan Uesato, Nate Kushman, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process- and outcome-based feedback, 2022. URL https://arxiv.org/abs/2211.14275." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 410, + 505, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 410, + 505, + 445 + ], + "spans": [ + { + "bbox": [ + 106, + 410, + 505, + 445 + ], + "type": "text", + "content": "Junxiong Wang, Daniele Paliotta, Avner May, Alexander Rush, and Tri Dao. The mamba in the llama: Distilling and accelerating hybrid models. Advances in Neural Information Processing Systems, 37:62432-62457, 2024a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 453, + 505, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 453, + 505, + 487 + ], + "spans": [ + { + "bbox": [ + 106, + 453, + 505, + 487 + ], + "type": "text", + "content": "Junxiong Wang, Daniele Paliotta, Avner May, Alexander M. Rush, and Tri Dao. The mamba in the llama: Distilling and accelerating hybrid models. arXiv preprint arXiv:2408.15237, 2024b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 495, + 505, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 495, + 505, + 530 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 505, + 530 + ], + "type": "text", + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models, 2023. URL https://arxiv.org/abs/2203.11171." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 536, + 505, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 536, + 505, + 583 + ], + "spans": [ + { + "bbox": [ + 106, + 536, + 505, + 583 + ], + "type": "text", + "content": "Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. Emergent abilities of large language models, 2022. URL https://arxiv.org/abs/2206.07682." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 590, + 505, + 626 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 590, + 505, + 626 + ], + "spans": [ + { + "bbox": [ + 106, + 590, + 505, + 626 + ], + "type": "text", + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed Chi, Quoc Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models, 2023. URL https://arxiv.org/abs/2201.11903." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 633, + 505, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 633, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 106, + 633, + 505, + 668 + ], + "type": "text", + "content": "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models, 2024. URL https://arxiv.org/abs/2408.00724." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 675, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 675, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 675, + 505, + 732 + ], + "type": "text", + "content": "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, Chenyang Shao, Yuwei Yan, Qinglong Yang, Yiwen Song, Sijian Ren, Xinyuan Hu, Yu Li, Jie Feng, Chen Gao, and Yong Li. Towards large reasoning models: A survey of reinforced reasoning with large language models, 2025. URL https://arxiv.org/abs/2501.09686." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 505, + 512 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 106, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 505, + 116 + ], + "type": "text", + "content": "Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2402.13116." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 121, + 505, + 155 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 121, + 505, + 155 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 505, + 155 + ], + "type": "text", + "content": "Songlin Yang, Bailin Wang, Yikang Shen, Rameswar Panda, and Yoon Kim. Gated linear attention transformers with hardware-efficient training, 2024. URL https://arxiv.org/abs/2312.06635." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 160, + 505, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 160, + 505, + 185 + ], + "spans": [ + { + "bbox": [ + 106, + 160, + 505, + 185 + ], + "type": "text", + "content": "Wang Yang, Hongye Jin, Jingfeng Yang, Vipin Chaudhary, and Xiaotian Han. Thinking preference optimization, 2025. URL https://arxiv.org/abs/2502.13173." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 190, + 505, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 190, + 505, + 224 + ], + "spans": [ + { + "bbox": [ + 106, + 190, + 505, + 224 + ], + "type": "text", + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models, 2023. URL https://arxiv.org/abs/2305.10601." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 229, + 505, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 229, + 505, + 264 + ], + "spans": [ + { + "bbox": [ + 106, + 229, + 505, + 264 + ], + "type": "text", + "content": "Jingyang Yuan, Huazuo Gao, Damai Dai, Junyu Luo, Liang Zhao, Zhengyan Zhang, Zhenda Xie, YX Wei, Lean Wang, Zhiping Xiao, et al. Native sparse attention: Hardware-aligned and natively trainable sparse attention. arXiv preprint arXiv:2502.11089, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 269, + 505, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 269, + 505, + 304 + ], + "spans": [ + { + "bbox": [ + 106, + 269, + 505, + 304 + ], + "type": "text", + "content": "Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025. Notion Blog." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 308, + 505, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 308, + 505, + 342 + ], + "spans": [ + { + "bbox": [ + 106, + 308, + 505, + 342 + ], + "type": "text", + "content": "Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Restmcts*: Llm self-training via process reward guided tree search, 2024a. URL https://arxiv.org/abs/2406.03816." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 348, + 505, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 348, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 106, + 348, + 505, + 392 + ], + "type": "text", + "content": "Michael Zhang, Simran Arora, Rahul Chalamala, Benjamin Frederick Spector, Alan Wu, Krithik Ramesh, Aaryan Singhal, and Christopher Re. Lolcats: On low-rank linearizing of large language models. In The Thirteenth International Conference on Learning Representations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 399, + 505, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 399, + 505, + 433 + ], + "spans": [ + { + "bbox": [ + 106, + 399, + 505, + 433 + ], + "type": "text", + "content": "Michael Zhang, Kush Bhatia, Hermann Kumbong, and Christopher Ré. The hedgehog & the porcupine: Expressive linear attentions with softmax mimicry, 2024b. URL https://arxiv.org/abs/2402.04347." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 438, + 505, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 438, + 505, + 472 + ], + "spans": [ + { + "bbox": [ + 106, + 438, + 505, + 472 + ], + "type": "text", + "content": "Shun Zhang, Zhenfang Chen, Yikang Shen, Mingyu Ding, Joshua B. Tenenbaum, and Chuang Gan. Planning with large language models for code generation, 2023. URL https://arxiv.org/abs/2303.05510." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 478, + 505, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 478, + 505, + 512 + ], + "spans": [ + { + "bbox": [ + 106, + 478, + 505, + 512 + ], + "type": "text", + "content": "Xuan Zhang, Fengzhuo Zhang, Cunxiao Du, Chao Du, Tianyu Pang, Wei Gao, and Min Lin. Lighttransfer: Your long-context llm is secretly a hybrid model with effortless adaptation. arXiv preprint arXiv:2410.13846, 2024c." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 106, + 531, + 287, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 531, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 106, + 531, + 287, + 544 + ], + "type": "text", + "content": "A Limitations and Future Work" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 555, + 504, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 504, + 625 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 504, + 625 + ], + "type": "text", + "content": "Speedup. Our current hybrid model is only " + }, + { + "bbox": [ + 105, + 555, + 504, + 625 + ], + "type": "inline_equation", + "content": "3 \\times" + }, + { + "bbox": [ + 105, + 555, + 504, + 625 + ], + "type": "text", + "content": " faster than a Transformer of the same size when serving inference with vLLM. Recently, NVIDIA introduced a new hybrid Mamba kernel7, which could further boost the speed of hybrid models. Additionally, our attention implementation in hybrid models does not yet leverage the optimizations available in vLLM. Integrating M1 into vLLM could further boost performance by taking advantage of these attention speedups." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 635, + 504, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 504, + 714 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 504, + 714 + ], + "type": "text", + "content": "Why do we not distill Qwen2.5 1.5B MATH model. We considered using the Qwen2.5 1.5B MATH Instruct model as the distillation target in the first stage. However, we found that the cross entropy loss of the Qwen 1.5B MATH model on the OpenMATH Instruct dataset (Toshniwal et al., 2024) exceeded 1.8, which is much higher than that of the Llama models (0.5). This suggests that, to mimic the Qwen2.5 model, we need a dataset generated from a large Qwen2.5 series model rather than this one generated from the Llama models. Dataset curation from Qwen Math models goes beyond the scope of this work." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 116, + 720, + 505, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 505, + 731 + ], + "type": "text", + "content": "7https://github.com/NVIDIA/Megatron-LM/commit/b957578e76a921209ef873cbbd389114a4042542" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 258 + ], + "type": "text", + "content": "Improvement on RL training speed. Recently, DeepSeek R1 (DeepSeek-AI et al., 2025) showed that reinforcement learning (RL) is a key component in improving model reasoning performance during post-training. Since then, recent research has predominantly relied on reinforcement learning (RL) as a training paradigm for reasoning models. However, training with RL requires the efficient generation of long sequences. For example, in VeRL (Sheng et al., 2024), the typical training batch size ranges from a few thousand to several thousand. DeepscaleR (Luo et al., 2025) also shows a significant accuracy boost when training RL with longer sequences, as it tends to enhance model performance by providing more steps for thorough reasoning. However, this shift towards reinforcement learning has resulted in the generation process becoming a significant bottleneck in reasoning model training, taking more than three times as long as the actor's weight update (forward + backward) according to the time profiling done for DeepscaleR (Luo et al., 2025). This need for efficient generation in RL presents a significant challenge for transformer models, namely due to the heavy computational burden imposed by large key-value caches during generation, especially for large batch sizes. Given their generation speed advantages, linear RNN models may be better suited for scaling RL training." + } + ] + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10458/7fea48cf-977d-4933-8361-31658163081b_content_list.json b/data/2025/2504_10xxx/2504.10458/7fea48cf-977d-4933-8361-31658163081b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..ae9ead81d686b9b8741b98a035fba1c25536bb2b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/7fea48cf-977d-4933-8361-31658163081b_content_list.json @@ -0,0 +1,1219 @@ +[ + { + "type": "text", + "text": "GUI-R1: A Generalist R1-Style Vision-Language Action Model For GUI Agents", + "text_level": 1, + "bbox": [ + 202, + 122, + 795, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Run Luo $^{1,2}$ Lu Wang $^{3}$ Wanwei He $^{1,2}$ Longze Chen $^{1,2}$ Jiaming Li $^{1,2}$", + "bbox": [ + 243, + 223, + 750, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Min Yang $^{1,2}$ Xiaobo Xia $^{3}$", + "bbox": [ + 408, + 239, + 589, + 253 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences", + "bbox": [ + 272, + 255, + 725, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ University of Chinese Academy of Sciences", + "bbox": [ + 364, + 268, + 632, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ National University of Singapore", + "bbox": [ + 397, + 282, + 601, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$\\left\\{\\mathrm{R.LUO@SIAT.AC.CN}\\right.$ M.YANG@SIAT.AC.CN XIAOBOXIA.UNI@GMAIL.COM}", + "bbox": [ + 251, + 296, + 745, + 310 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 345, + 537, + 361 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Existing efforts in building graphical user interface (GUI) agents largely rely on the training paradigm of supervised fine-tuning (SFT) on large vision-language models (LVLMs). However, this approach not only demands extensive amounts of training data but also struggles to effectively understand GUI screenshots and generalize to unseen interfaces. The issue significantly limits its application in real-world scenarios, especially for high-level tasks. Inspired by reinforcement fine-tuning (RFT) in large reasoning models (e.g., DeepSeek-R1), which efficiently enhances the problem-solving capabilities of large language models in real-world settings, we propose GUI-R1, the first reinforcement learning framework designed to enhance the GUI capabilities of LVLMs in high-level real-world task scenarios, through unified action space rule modeling. By leveraging a small amount of carefully curated high-quality data across multiple platforms (including Windows, Linux, MacOS, Android, and Web) and employing policy optimization algorithms such as group relative policy optimization (GRPO) to update the model, GUI-R1 achieves superior performance using only $0.02\\%$ of the data (3K vs. 13M) compared to previous state-of-the-art methods like OS-Atlas across eight benchmarks spanning three different platforms (mobile, desktop, and web). These results demonstrate the immense potential of reinforcement learning based on unified action space rule modeling in improving the execution capabilities of LVLMs for real-world GUI agent tasks. The codebase is available at https://github.com/ritzzz-ai/GUI-R1.git.", + "bbox": [ + 228, + 376, + 766, + 667 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 696, + 313, + 713 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent studies [1; 2; 3] have explored the use of large vision-language models (LVLMs) [4] to develop graphical user interface (GUI) agents capable of performing high-level complex tasks. These agents analyze the screen as a self-contained source of information for decision-making, without relying on environment-based textual descriptions such as HTML or accessibility trees. This approach offers greater flexibility in agent decision-making. However, previous works have predominantly relied on the training paradigm of supervised fine-tuning (SFT), which not only requires large amounts of high-quality training data but also struggles to effectively comprehend GUI screenshots and generalize to unseen interfaces. These limitations have significantly hindered the real-world applicability of these works, particularly for high-level GUI tasks that lack explicit step-by-step instructions.", + "bbox": [ + 169, + 728, + 823, + 854 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Rule-based reinforcement fine-tuning has recently emerged as an efficient and scalable alternative to SFT, requiring only a small number of examples to fine-tune models effectively while demonstrating strong performance and generalization capabilities in domain-specific tasks. RFT has been increas", + "bbox": [ + 169, + 859, + 823, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10458v4 [cs.CV] 1 Oct 2025", + "bbox": [ + 22, + 282, + 57, + 710 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Technical report.", + "bbox": [ + 171, + 922, + 274, + 936 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a343d8658955f5376bd79dff57d34f2952e7c91622500bc8d7130f3f5cedf694.jpg", + "image_caption": [ + "(a) Grounding capability." + ], + "image_footnote": [], + "bbox": [ + 174, + 102, + 385, + 251 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/512270404c63b21f462da324a0f27a925ac93742bbd95f28afe9e2cfeb780bbe.jpg", + "image_caption": [ + "(b) Low-level task capability." + ], + "image_footnote": [], + "bbox": [ + 393, + 95, + 604, + 252 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c6b9af241d50e031e4855976a092383154a95e81b4c574bc8196fcbcc277f33b.jpg", + "image_caption": [ + "(c) High-level task capability.", + "Figure 1: GUI-R1 achieves the best performance on eight evaluation datasets covering various platforms and task granularities, demonstrating the promising potential of RFT in GUI agent tasks." + ], + "image_footnote": [], + "bbox": [ + 614, + 90, + 823, + 250 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ingly adopted for developing various LVLMs [5; 6; 7; 8; 9]. Inspired by these advancements, this study extends the rule-based reinforcement learning (RL) paradigm to the domain of GUI agents, which focuses on GUI action prediction tasks within a unified action space driven by high-level instructions. Specifically, LVLMs generate multiple responses (trajectories) for each input, containing both reasoning traces and final answers. These responses are evaluated using a unified action space reward function designed in this work, and the model is updated through policy optimization [10]. This iterative self-learning process enhances the model's reasoning capabilities in action prediction and its generalization to out-of-distribution (OOD) scenarios. By modeling a unified action space, we efficiently curate high-quality data spanning multiple platforms, including Windows, Linux, MacOS, Android, and Web, while avoiding action prediction conflicts across different platforms.", + "bbox": [ + 169, + 332, + 826, + 470 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As demonstrated in Figure 1, the proposed framework (GUI-R1) achieves superior performance using only $0.02\\%$ of the data (3K vs. 13M) compared to previous state-of-the-art methods like OSAtlas [1] across eight benchmarks covering three different platforms (mobile, desktop, and web) and three levels of task granularity (low-level grounding, low-level tasks, and high-level tasks). Before delving into details, we clearly emphasize our contribution as follows.", + "bbox": [ + 169, + 477, + 823, + 547 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose GUI-R1, the first framework that utilizes rule-based reinforcement fine-tuning to enhance the reasoning capabilities of LVLMs in high-level GUI action prediction tasks.", + "- We design a rule-based unified action space reward function, which efficiently validates GUI task responses across different platforms and task granularities. This ensures reliable and efficient data selection and model training.", + "- Leveraging the rule-based unified action space reward function, we construct GUI-R1-3K, which is a high-quality fine-tuning dataset with diversity and complexity. This dataset significantly improves both training efficiency and model performance.", + "- We conduct a comprehensive evaluation of GUI agents, covering three distinct platforms (desktop, mobile, and web) and three levels of task granularity (low-level grounding, low-level tasks, and high-level tasks) across eight benchmarks. Experimental results demonstrate that our GUI-R1 is leading in multiple realistic cases. This creates a strong baseline of GUI agents for future research." + ], + "bbox": [ + 215, + 556, + 823, + 752 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 171, + 771, + 321, + 787 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 GUI Agents", + "text_level": 1, + "bbox": [ + 171, + 801, + 295, + 816 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Autonomous agents driven by large foundation models (e.g., large language models (LLMs) and large vision-language models (LVLMs)) have gained significant attention for their powerful interactive capabilities [11]. These operating systems via programs or API calls [12; 13]. However, the closed-source nature of most commercial software limits access to internal APIs or code, which promotes a shift in research toward GUI agents. Different from traditional programmatic agents, GUI agents simulate human interactions via mouse and keyboard inputs, which enable broader flexibility", + "bbox": [ + 169, + 827, + 823, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "in solving complex tasks. Recent works have advanced this direction. For instance, UGround [14] developed a specialized GUI grounding model for precise GUI element localization. OS-Atlas [1] introduced large action models to handle general agent tasks by interpreting human intentions and predicting actions in the form of function calls. UITars [2] proposed a more comprehensive method by combining GUI-related pretraining with task-level reasoning fine-tuning to better capture the complexity of GUI interactions. Nevertheless, these methods all rely on the paradigm of supervised fine-tuning (SFT), which suffers from two main limitations: (1) the training process requires vast amounts of diverse data; (2) the models exhibit limited generalization capabilities, which struggle to understand GUI screenshots and adapt to unseen interfaces. These limitations motivate the development of a more advanced learning paradigm for GUI agents beyond traditional SFT methods.", + "bbox": [ + 169, + 90, + 823, + 229 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Reinforcement Fine-Tuning", + "text_level": 1, + "bbox": [ + 171, + 246, + 405, + 262 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Rule-based reinforcement fine-tuning, exemplified by OpenAI o1 [15] and DeepSeek-R1 [10], has demonstrated strong performance in mathematical reasoning [16], code generation [17], and multi-step logic tasks [18]. Subsequent studies have extended this paradigm to multimodal models by designing task-specific reward functions for vision-based tasks, such as correct class prediction in image classificati [19; 7; 20], intersection-over-union (IoU) metrics in image localization and detection [6; 5], and accurate click position prediction in low-level GUI grounding tasks [9]. These works demonstrate that verifiable reward signals, e.g., symbolic correctness or execution-based feedback, can effectively substitute traditional supervision. Despite the strong potential of RFT in various tasks, it remains underexplored in complex high-level GUI agent tasks. Compared to other domains, building intelligent agents for high-level GUI tasks is particularly challenging due to diverse UI layouts, implicit task semantics, and long-horizon action dependencies. This imposes higher demands on the model's contextual learning and understanding capabilities. To the best of our knowledge, GUI-R1 is the first RFT-based framework specifically designed for high-level GUI agents.", + "bbox": [ + 169, + 272, + 823, + 452 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 GUI-R1 Framework", + "text_level": 1, + "bbox": [ + 171, + 472, + 377, + 488 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/9fc200b1f5a26e06c697f75304bb82ca2b26802b7fc14558a867a6658bcecfc3.jpg", + "image_caption": [ + "Figure 2: Overview of the GUI-R1 Framework. Given the high-level instruction, action history, and visual image inputs, the policy model generates multiple responses containing reasoning steps. Then the verifiable rewards, such as action type reward, click point reward, and input text reward, are used with the policy gradient optimization algorithm to update the policy model." + ], + "image_footnote": [], + "bbox": [ + 179, + 510, + 816, + 686 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "GUI-R1 is based on a reinforcement learning training paradigm designed to enhance the ability of GUI agents to complete sophisticated instructional tasks. As shown in Figure 2, unlike low-level tasks, high-level GUI tasks lack explicit and fine-grained instructions, which require action predictions based on high-level task objectives and execution history. This imposes greater demands on the model's contextual learning and understanding capabilities.", + "bbox": [ + 169, + 770, + 823, + 842 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Preliminaries", + "text_level": 1, + "bbox": [ + 171, + 857, + 307, + 871 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We define the goal of GUI agents in high-level instructional tasks as understanding and executing low-level instructions to complete the high-level task $Q$ , based on the current interface image $I$", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and the execution history $H$ . Formally, given the input $Q$ , $I$ , and $H$ , the model generates a set of candidate responses $O = \\{o_1, o_2, \\dots, o_N\\}$ , where each response contains attributes of the predicted low-level action $o^{\\mathrm{act}}$ , input text $o^{\\mathrm{text}}$ , and input point $o^{\\mathrm{point}}$ . Each response is evaluated using a unified action space reward function to compute its reward $\\{r_1, r_2, \\dots, r_N\\}$ . GRPO [10] is applied to estimate advantages and update the policy model under KL divergence constraints. The relative advantage $A_i$ of the $i$ -th response is calculated as follows:", + "bbox": [ + 169, + 90, + 823, + 175 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nA _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} (\\{r _ {1} , r _ {2} , \\ldots , r _ {N} \\})}{\\operatorname {s t d} (\\{r _ {1} , r _ {2} , \\ldots , r _ {N} \\})},\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 191, + 617, + 224 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where mean and std denote the mean and standard deviation of the rewards, respectively.", + "bbox": [ + 171, + 232, + 759, + 247 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Verifiable Rewards in Unified Action Space", + "text_level": 1, + "bbox": [ + 171, + 261, + 514, + 277 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We adopt a unified action space modeling strategy, which extracts action space categories across different platforms and integrates them into a unified action space. This ensures that all high-level instructions can be decomposed into a sequence of atomic actions, resolving action space conflicts in multi-platform data joint training. Based on the unified action space, we design verifiable reward functions to evaluate the accuracy of predicted actions to guide reinforcement learning. We detail these verifiable rewards below.", + "bbox": [ + 169, + 287, + 823, + 369 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Format reward. Following previous work [20; 10; 6], we introduce format rewards during training to evaluate whether the generated output adheres to the expected structural format, including both syntactic and semantic validity. Specifically, format rewards guide the model to generate reasoning processes and final answers in a structured format, which play a critical role in self-learning and iterative improvement during reinforcement fine-tuning. The format reward templates used in training and inference are as follows, where ‘’ represents the reasoning process and ‘’ represents the final answer.", + "bbox": [ + 169, + 377, + 823, + 473 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unified Action Space Prompt for Task Training and Inference", + "text_level": 1, + "bbox": [ + 204, + 484, + 609, + 498 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "You are GUI-R1, a reasoning GUI Agent Assistant. In this UI screenshot $<$ image $>$ , I want you to continue executing the command task, with the action history being history. Please provide the action to perform (enterprise from [complete, close/delete, press_home, click, press_back, type, select, scroll, enter]), the point where the cursor is moved to (integer) if a click is performed, and any input text required to complete the action.", + "bbox": [ + 196, + 505, + 800, + 587 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Output the thinking process in tags, and the final answer in tags as follows: ... [action]: enum[complete, close/delete, press_home, click, press_back, type, select, scroll, enter], 'point': [x, y], 'input_text': 'no input text [default]'].", + "bbox": [ + 196, + 588, + 800, + 659 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unified Action Space Prompt for Grounding Training and Inference", + "text_level": 1, + "bbox": [ + 204, + 676, + 651, + 691 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "You are GUI-R1, a reasoning GUI Agent Assistant. In this UI screenshot $<$ image $>$ , I want you to continue executing the command task, with the action history being history. Please provide the action to perform (Enumerate from [click]), the point where the cursor is moved to (integer) if a click is performed, and any input text required to complete the action.", + "bbox": [ + 196, + 698, + 800, + 767 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Output the thinking process in tags, and the final answer in tags as follows: ... [‘action’: enum(click], ‘point’: [x, y], ‘input_text’: ‘no input text [default]’.", + "bbox": [ + 196, + 768, + 800, + 825 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Accuracy rewards. For the model's predicted output $o = \\{o^{\\mathrm{act}}, o^{\\mathrm{text}}, o^{\\mathrm{point}}\\}$ , which consists of three components: $o^{\\mathrm{act}}$ (action type, e.g., click, scroll), $o^{\\mathrm{point}}$ (click point position), and $o^{\\mathrm{text}}$ (input text), we define the accuracy reward $R_{\\mathrm{acc}}$ as a combination of action type reward $R_{\\mathrm{act}}$ , click point reward $R_{\\mathrm{point}}$ , and input text reward $R_{\\mathrm{text}}$ , i.e., $R_{\\mathrm{acc}} = R_{\\mathrm{act}} + R_{\\mathrm{point}} + R_{\\mathrm{text}}$ . This design provides reliable correctness rewards for all actions.", + "bbox": [ + 169, + 840, + 823, + 910 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Action type reward. The action type reward $R_{\\mathrm{act}}$ is calculated by comparing the predicted action type $o^{\\mathrm{act}}$ with the ground truth action type $gt^{\\mathrm{act}}$ . If $o^{\\mathrm{act}} == gt^{\\mathrm{act}}$ , the reward is 1; otherwise, it is 0. This simple yet effective evaluation mechanism guides action type prediction.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Click point reward. The click point reward $R_{\\mathrm{point}}$ is calculated by comparing the predicted click point $o^{\\mathrm{point}} = [x, y]$ with the ground truth bounding box $gt^{\\mathrm{bbox}} = [x_1, y_1, x_2, y_2]$ . The calculation formula is as follows:", + "bbox": [ + 169, + 138, + 825, + 183 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nR _ {\\text {p o i n t}} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} o ^ {\\text {p o i n t}} \\in g t ^ {\\text {b b o x}}, \\\\ 0 & \\text {o t h e r w i s e}. \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 392, + 205, + 602, + 239 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Input text reward The input text reward $R_{\\mathrm{text}}$ is calculated by comparing the predicted input text $o^{\\mathrm{text}}$ with the ground truth text parameter $gt^{\\mathrm{text}}$ using the semantic $F_1$ score. The calculation formula is as follows:", + "bbox": [ + 169, + 255, + 823, + 297 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nR _ {\\text {t e x t}} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} F _ {1} (o ^ {\\text {t e x t}}, g t ^ {\\text {t e x t}}) > 0. 5, \\\\ 0 & \\text {o t h e r w i s e .} \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 320, + 622, + 354 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Response reward. The final response reward is composed of format rewards and accuracy rewards, defined as: $R_{o} = \\alpha R_{\\mathrm{f}} + \\beta R_{\\mathrm{acc}}$ , where $R_{\\mathrm{f}}$ represents the format reward, $R_{\\mathrm{acc}}$ represents the accuracy reward, and $\\alpha$ and $\\beta$ are weighting parameters respectively.", + "bbox": [ + 169, + 369, + 823, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Training Data Curation", + "text_level": 1, + "bbox": [ + 171, + 431, + 377, + 448 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Data collection. We collect data related to GUI tasks from multiple open-source datasets, including FineWeb [21], UIBert [22], AMEX [23], RICOSCA [24], as well as portions of pretraining data from Seeclick [3] and OS-Otlas [1]. This leads to $\\sim 14\\mathrm{M}$ examples of grounding and low-level task data. Additionally, we collect $\\sim 30\\mathrm{K}$ high-level GUI data points from OS-Otlas instruction datasets. In total, we gather $\\sim 14\\mathrm{M}$ examples spanning multiple platforms (including Windows, Linux, MacOS, Android, and Web) and various task granularities (grounding, low-level, and high-level).", + "bbox": [ + 169, + 458, + 823, + 542 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Data filtering. To filter out low-quality data for efficient RFT, we use the Qwen2.5VL-7B [4] model to generate 10 responses for each example and evaluate them using a rule-based reward function designed for unified action space modeling. We remove the problems with an estimated accuracy of 0 or 1 to ensure a stable training process, resulting in 140K low-level data and 1.5K high-level data. Since the quantity of low-level data far exceeds that of high-level data, we randomly sample 1.5K low-level data and combine it with all high-level data to create a balanced dataset of 3K high-quality training examples, named GUI-R1-3K. The distribution of image categories, action types, and corresponding difficulty levels is demonstrated in Figure 3.", + "bbox": [ + 169, + 547, + 825, + 660 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/742f4555af634ff81413e847bb29c4c5604960cfdf6bbadf2119d5b60dcd2306.jpg", + "image_caption": [ + "(a) Image category quantity and difficulty distribution." + ], + "image_footnote": [], + "bbox": [ + 176, + 676, + 488, + 838 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/eff777f438fcbcc5a1379031cf20f5447ef49173caac6a932d40410b6c6337c6.jpg", + "image_caption": [ + "(b) Action category quantity and difficulty distribution.", + "Figure 3: Illustrations of image and action category quantity and difficulty distributions in the dataset GUI-R1-3K." + ], + "image_footnote": [], + "bbox": [ + 504, + 676, + 821, + 839 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 171, + 89, + 313, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Implementation Details", + "text_level": 1, + "bbox": [ + 171, + 125, + 377, + 140 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training and inference details. For supervised fine-tuning (SFT), we use the QwenVL2.5-3B/7B [4] model as the base model for experiments and employ the LLaMA Factory [25] framework for one epoch of training to avoid overfitting. For RFT, we use the EasyR1 [26] framework for training over nine epochs. During inference, to ensure fairness, we apply a unified and simple prompt across all comparison methods, and conduct experiments under zero-shot prompt configurations. All experiments are conducted using $8 \\times$ NVIDIA A100-80G GPUs.", + "bbox": [ + 169, + 154, + 823, + 238 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation benchmarks. We evaluate our model on eight agent benchmarks on three different platforms, including AndroidControl-Low [27], AndroidControl-High [27], GUI-Odyssey [28], ScreenSpot [3], ScreenSpot-Pro [29], GUI-Act-Web [30], OmniAct-Web [31], and OmniAct-Desktop [31]. We only use the test splits of these benchmarks for evaluation.", + "bbox": [ + 169, + 243, + 823, + 300 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation metrics. Following Os-Atlas [1], we use three commonly adopted metrics for GUI agents in evaluation: action type prediction accuracy, click point prediction accuracy, and step success rate, denoted as Type, Grounding, and SR, respectively. In more detail, Type measures the exact match score between the predicted action types (e.g., 'click' and 'scroll') and the ground truth. Grounding evaluates the performance of GUI grounding in downstream tasks. Besides, SR represents the step-wise success rate, where a step is deemed successful only if both the predicted action and its associated arguments (e.g., point for click actions and input text for scroll actions) are correct.", + "bbox": [ + 169, + 305, + 825, + 404 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Experimental Results", + "text_level": 1, + "bbox": [ + 171, + 426, + 362, + 441 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We here evaluate our GUI-R1 model by comparing it with current state-of-the-art (SOTA) models on various tasks including GUI grounding tasks, GUI low-level tasks, and GUI high-level tasks.", + "bbox": [ + 169, + 455, + 823, + 484 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Grounding capability. We evaluate the grounding capability of GUI-R1 using ScreenSpot [3] and ScreenSpot-Pro [29]. ScreenSpot assesses GUI grounding performance across mobile, desktop, and web platforms, while ScreenSpot-Pro focuses on high-resolution professional environments, featuring expert-annotated tasks spanning 23 applications, five industries, and three operating systems.", + "bbox": [ + 169, + 489, + 823, + 547 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Table 1, compared to the previous SOTA model Os-Atlas-7B, which was trained with large-scale data using supervised fine-tuning (SFT), the RFT approach achieves superior performance on the 3B-sized Qwen2.5-VL model using only $0.2\\%$ of the data (3K vs. 14M). Furthermore, compared to the base models QwenVL2.5-3B/7B and the SFT-trained QwenVL2.5* 3B/7B models using the same dataset, the RFT-based GUI-R1 demonstrates significantly better performance in GUI grounding tasks. Moreover, at the 3B scale, GUI-R1 achieves substantial gains over SFT models on ScreenSpot (80.08 vs. 63.55) and ScreenSpot-Pro (25.23 vs. 13.80), representing improvements of $26.3\\%$ and $82.8\\%$ , respectively. This highlights the effectiveness of the RL training framework in leveraging small-scale datasets to achieve significant performance improvements, which demonstrates its potential as a data-efficient and scalable approach for model training in resource-constrained environments.", + "bbox": [ + 169, + 551, + 825, + 705 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Low-level task capability. We evaluate the low-level task execution capability of GUI-R1 using four benchmark datasets: AndroidControl-Low [27], GUI-Act-Web [29], OmniAct-Web, and OmniAct-Desktop [31]. AndroidControl-Low evaluates low-level task execution on mobile platforms, while GUI-Act-Web and OmniAct-Web focus on low-level task execution on web platforms. OmniAct-Desktop is used to test low-level task execution on desktop platforms.", + "bbox": [ + 169, + 710, + 823, + 781 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As demonstrated in Table 2, our method impressively improves the success rate of GUI low-level tasks for 3B and 7B models, with the average success rate increasing from 55.65 to 80.88 at the 3B scale. Compared to UI-R1 [9], which is concurrent work also trained using RFT, our model achieves a 10-point improvement at the 3B scale, validating that RL training focused on high-level tasks can further enhance the model's understanding of low-level instructions. Note that an interesting observation is that the use of small-scale SFT data even leads to performance degradation on some metrics such as GR on AndroidControl-Low. This limitation stems from SFT's reliance on task-specific labeled data, which constrains the model's ability to adapt to unseen environments. In contrast, our RFT method not only enhances out-of-distribution (OOD) generalization by optimizing", + "bbox": [ + 169, + 786, + 825, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/cafd0148da670145984d2db9becf6696e576a880ea6b7afab20c4ab996d6e76f.jpg", + "table_caption": [ + "Table 1: GUI grounding accuracy on ScreenSpot and ScreenSpot-Pro. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold." + ], + "table_footnote": [], + "table_body": "
ModelsScreenSpot-ProScreenSpot
DevCreativeCADScientificOfficeOSWebDesktop
TextIconTextIconTextIconTextIconTextIconTextIconTextIconText
Supervised Fine-Tuning
SeeClick0.60.01.00.02.50.03.50.01.10.02.80.055.732.572.230.0
Os-Atlas-4B7.10.03.01.42.00.09.05.55.13.85.60.082.663.172.145.7
ShowUI-2B16.91.49.10.02.50.013.27.315.37.510.32.2----
CogAgent-18B14.90.79.60.07.13.122.21.813.00.05.60.070.428.674.220.0
Aria-GUI16.20.023.72.17.61.627.16.420.31.94.70.0----
UGround-7B26.62.127.32.814.21.631.92.731.611.317.80.080.470.482.563.6
Claude**22.03.925.93.414.53.733.915.830.116.311.04.5----
Os-Atlas-7B33.11.428.82.812.24.737.57.333.95.727.14.590.874.291.762.8
QwenVL2.5-3B*20.31.824.62.811.24.739.56.428.65.717.82.273.048.585.746.2
QwenVL2.5-7B*31.41.827.33.515.75.140.77.939.78.932.46.987.868.290.362.8
Zero Shot
QwenVL-7B0.00.00.00.00.00.00.70.00.00.00.00.0----
GPT-4o1.30.01.00.02.00.02.10.01.10.00.00.0----
QwenVL2.5-3B16.21.423.31.410.24.738.26.424.33.815.01.160.843.570.135.0
QwenVL2.5-7B33.12.123.73.512.26.336.87.337.87.530.86.986.965.189.760.0
Reinforcement Fine-Tuning
UI-R1-3B22.74.127.33.511.26.343.411.832.211.313.14.585.273.390.259.3
GUI-R1-3B33.84.840.95.626.47.861.817.353.617.028.15.689.672.193.864.8
GUI-R1-7B49.44.838.98.423.96.355.611.858.726.442.116.991.375.791.873.6
", + "bbox": [ + 173, + 138, + 823, + 429 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/fc5cfb0efb77c23d45296aad426bb409af65cd88b79315371f87476014280469.jpg", + "table_caption": [ + "Table 2: GUI low-level task accuracy on GUI-Act-Web, OmniAct-Web, OmniAct-Desktop, and AndroidControl-Low. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold." + ], + "table_footnote": [], + "table_body": "
ModelsGUI-Act-WebOmniAct-WebOmniAct-DesktopAndroidControl-LowOverall
TypeGRSRTypeGRSRTypeGRSRTypeGRSR
Supervised Fine-Tuning
Os-Atlas-4B79.2258.5742.6246.7449.2422.9963.3042.5526.9464.5871.1940.6250.71
Os-Atlas-7B86.9575.6157.0285.6369.3559.1590.2462.8756.7373.0073.3750.9470.07
QwenVL2.5-3B*76.9566.3461.6966.2456.9153.0277.6262.5463.7671.0874.5358.7965.79
QwenVL2.5-7B*87.6684.7779.8981.6273.4573.3986.2380.1779.8084.0085.7464.3280.09
Zero Shot
GPT-4o77.0945.0241.8479.3342.7934.0679.9763.2550.6774.3338.6728.3954.46
QwenVL2.5-3B56.1064.2855.6150.6346.8947.0256.9547.9746.8962.0374.0759.3255.65
QwenVL2.5-7B86.5984.3978.6379.1571.3271.2184.7479.8979.6683.4487.0862.5079.05
Reinforcement Fine-Tuning
UI-R1-3B75.8979.4367.3175.4261.3561.3373.4164.1263.9879.1582.4166.4470.85
GUI-R1-3B89.8687.4276.3188.5875.1075.0891.8678.3778.3183.6881.5964.4180.88
GUI-R1-7B90.8588.0680.3191.1677.2977.3592.2083.3683.3385.1784.0266.5283.30
", + "bbox": [ + 173, + 496, + 823, + 702 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "task-specific rewards but also achieves this with fewer training examples, which provides a scalable and efficient alternative to traditional SFT methods.", + "bbox": [ + 169, + 731, + 823, + 758 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "High-level task capability. We evaluate the high-level task execution capability of GUI-R1 using AndroidControl-High [27] and GUI-Odyssey [28]. AndroidControl-High evaluates high-level task execution on mobile platforms, while GUI-Odyssey focuses on cross-app navigation scenarios, featuring high-level tasks spanning six applications and 203 apps.", + "bbox": [ + 169, + 766, + 823, + 823 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Table 3, due to our unified action space with rule-based reward modeling, GUI-R1 achieves SOTA on high-level GUI tasks. Compared to the closed-source model GPT-4o, our 3B-scale method achieves an absolute improvement of 21.06, demonstrating that RFT, in contrast to SFT, can efficiently and reliably enhance the success rate of GUI agents in real-world tasks. Furthermore, compared to UI-R1 [9], which focuses on improving low-level grounding capabilities, our model achieves an average improvement of 3.4 points at the 3B scale, with a particularly notable", + "bbox": [ + 169, + 827, + 826, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d0a5c889d078984ece46a418434aaa9819e88f9e9d29655aaa178ba48ae34a91.jpg", + "table_caption": [ + "Table 3: GUI high-level task accuracy on AndroidControl-High and GUI-Odyssey. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold." + ], + "table_footnote": [], + "table_body": "
ModelsAndroidControl-HighGUI-OdysseyOverall
TypeGRSRTypeGRSR
Supervised Fine-Tuning
OS-Atlas-4B49.0149.5122.7749.6334.6320.2537.63
OS-Atlas-7B57.4454.9029.8360.4239.7426.9644.88
QwenVL2.5-3B*52.0549.5341.2243.6932.2127.3141.00
QwenVL2.5-7B*69.1558.6948.1156.7838.6534.4450.97
Zero Shot
GPT-4o63.0630.9021.1737.5014.175.3628.69
QwenVL2.5-3B47.8146.5138.9037.4026.4926.6937.30
QwenVL2.5-7B68.6759.7147.0655.6037.7834.3750.53
Reinforcement Fine-Tuning
UI-R1-3B57.8555.7045.4452.1634.4632.4946.35
GUI-R1-3B58.0456.2446.5554.8441.5241.3349.75
GUI-R1-7B71.6365.5651.6765.4943.6438.7956.13
", + "bbox": [ + 222, + 138, + 776, + 386 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3a9ff69a46c697dceba16ce549cd2020caec5b420655f3f43a3832c8234e5942.jpg", + "image_caption": [ + "Figure 4: Ablation study of image resolution and data quality." + ], + "image_footnote": [], + "bbox": [ + 290, + 398, + 702, + 553 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "$27.2\\%$ lead in the step success rate on GUI-Odyssey. This indicates that RL training focused on low-level tasks is insufficient for handling complex high-level instructions. RFT designed for high-level tasks is better suited as a direction for developing GUI agent models.", + "bbox": [ + 169, + 595, + 823, + 640 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 Ablation Study", + "text_level": 1, + "bbox": [ + 171, + 648, + 318, + 664 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Image resolution and data quality. To investigate the impact of image resolution and data quality on GUI RFT, we conduct corresponding ablation experiments, with the results shown in Figure 4. As observed, when using the filtered GUI-R1-3K dataset, the model requires only a few updates to achieve relatively high rewards. In contrast, training with unfiltered and low-quality data necessitates significantly more training time for the model to converge, with a noticeably lower performance ceiling. To further explore the effect of image resolution on model training, we increase the image resolution to twice its original size (from 1,048,576 pixels to 2,097,152 pixels). As shown in Figure 4, because of the high resolution of GUI task images and the small size of many UI elements, increasing the image resolution allows the model to perceive these elements more clearly, which accelerates the convergence speed of RFT and improves the performance ceiling.", + "bbox": [ + 169, + 674, + 823, + 813 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Coefficients in the reward function. To explore the impact of the coefficients for format rewards and accuracy rewards in the reward function on the final performance, we conduct relevant ablation experiments, as shown in Table 4. The results indicate that reducing the coefficient ratio of format rewards leads to consistent performance improvements. This is because format rewards are easier to learn during training and often converge early in the process. By amplifying the accuracy rewards, the advantages of providing correct answers are further emphasized, ultimately leading to more performance improvements.", + "bbox": [ + 169, + 819, + 823, + 917 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/b3b746105ea2ffcd1fb7f8ee095e4365304aba370c8b0e79c09e244405980cdc.jpg", + "table_caption": [ + "Table 4: Ablation study of the coefficient $\\alpha$ and $\\beta$ in reward function." + ], + "table_footnote": [], + "table_body": "
αβAndroidControl-HighGUI-OdysseyOverall
TypeGRSRTypeGRSR
0.20.858.0456.2446.5554.8441.5241.3349.75
0.50.557.9355.9146.6252.7737.4435.6647.72
0.80.257.8555.7045.4452.1634.4632.4946.48
", + "bbox": [ + 287, + 112, + 709, + 191 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.4 Visualization", + "text_level": 1, + "bbox": [ + 171, + 217, + 305, + 231 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In Figure 5, we provide additional visualization of the training process. As shown in Figure 5a and Figure 5b, it can be observed that the format reward converges quickly in the early stages of training, while the accuracy reward becomes the main source of differentiated rewards in the later stages of training. Furthermore, as illustrated in Figure 5d, the mean response length first decreases and then gradually increases, but the \"aha moment\" does not occur. This may be due to the single-image input training method in a non-interactive environment, which prevents the model from autonomously tracing back the sequence of incorrect actions. Exploring multi-image high-level tasks in interactive environments could be a potential direction for inducing the emergence of the \"aha moment\" in future research.", + "bbox": [ + 169, + 242, + 826, + 366 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/e2de6aad495dae889bb09433213fc9eb2b0d78b38f07f8d686d2e22d3d1a9525.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 378, + 493, + 501 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/17f917d81e4fa2897b0b75f5c235d52a73e95bc59698049327863f8f4c7e5fdd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 378, + 823, + 501 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/11c1746550af984bf27ac6ab2bfa8bc0572a3d1eff36d53b128703bd9ce95da9.jpg", + "image_caption": [ + "(a) Accuracy reward curve with training steps.", + "(c) PG loss curve with training steps.", + "Figure 5: Visualization of the training process of GUI-R1. To provide more details, we report the curves of GUI-R1's key metrics during training, including format reward, accuracy reward, mean response length, and policy gradient (PG) loss, as they vary with the training steps." + ], + "image_footnote": [], + "bbox": [ + 173, + 521, + 493, + 643 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/3ccc48d75f9254f3950936c15024e9b372f78d1f4843180668d22b93c0ebadae.jpg", + "image_caption": [ + "(b) Format reward curve with training steps.", + "(d) Mean response length curve with training steps." + ], + "image_footnote": [], + "bbox": [ + 504, + 521, + 823, + 643 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 743, + 302, + 758 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This paper presents GUI-R1, which is the first GUI reinforcement learning framework grounded in unified action space rule modeling. By integrating reinforcement fine-tuning with large vision-language models, GUI-R1 enables effective contextual action prediction and verifiable reward-driven learning in GUI environments. Extensive experiments demonstrate that GUI-R1 consistently outperforms baselines on various tasks. Moving forward, we plan to extend GUI-R1 to support collaborative multi-agent interaction and robust error correction policies, enabling the system to handle complex tasks with greater scalability.", + "bbox": [ + 169, + 773, + 823, + 872 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 106 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Zhiyong Wu, Zhenyu Wu, Fangzhi Xu, Yian Wang, Qiushi Sun, Chengyou Jia, Kanzhi Cheng, Zichen Ding, Liheng Chen, Paul Pu Liang, et al. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218, 2024.", + "[2] Yujia Qin, Yining Ye, Junjie Fang, Haoming Wang, Shihao Liang, Shizuo Tian, Junda Zhang, Jiahao Li, Yunxin Li, Shijue Huang, et al. Ui-tars: Pioneering automated gui interaction with native agents. arXiv preprint arXiv:2501.12326, 2025.", + "[3] Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935, 2024.", + "[4] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025.", + "[5] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025.", + "[6] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025.", + "[7] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02.", + "[8] Haozhan Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, 2025. Accessed: 2025-02-15.", + "[9] Zhengxi Lu, Yuxiang Chai, Yaxuan Guo, Xi Yin, Liang Liu, Hao Wang, Guanjing Xiong, and Hongsheng Li. Ui-r1: Enhancing action prediction of gui agents by reinforcement learning. arXiv preprint arXiv:2503.21620, 2025.", + "[10] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "[11] Theodore Sumers, Shunyu Yao, Karthik Narasimhan, and Thomas Griffiths. Cognitive architectures for language agents. Transactions on Machine Learning Research, 2023.", + "[12] Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, et al. A survey on large language model based autonomous agents. Frontiers of Computer Science, 18(6):186345, 2024.", + "[13] Qiushi Sun, Zhangyue Yin, Xiang Li, Zhiyong Wu, Xipeng Qiu, and Lingpeng Kong. Corex: Pushing the boundaries of complex reasoning through multi-model collaboration. arXiv preprint arXiv:2310.00280, 2023.", + "[14] Boyu Gou, Ruohan Wang, Boyuan Zheng, Yanan Xie, Cheng Chang, Yiheng Shu, Huan Sun, and Yu Su. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243, 2024.", + "[15] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024.", + "[16] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + ], + "bbox": [ + 171, + 113, + 825, + 912 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[17] Jiawei Liu and Lingming Zhang. Code-r1: Reproducing r1 for code with reliable rewards. arXiv preprint arXiv:2503.18470, 2025.", + "[18] Zihan Wang*, Kangrui Wang*, Qineng Wang*, Pingyue Zhang*, Linjie Li*, Zhengyuan Yang, Kefan Yu, Minh Nhat Nguyen, Monica Lam, Yiping Lu, Kyunghyun Cho, Jiajun Wu, Li Fei-Fei, Lijuan Wang, Yejin Choi, and Manling Li. Training agents by reinforcing reasoning, 2025.", + "[19] Zhenyu Pan and Han Liu. Metaspatial: Reinforcing 3d spatial reasoning in vlms for the meta-verse. arXiv preprint arXiv:2503.18470, 2025.", + "[20] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025.", + "[21] Guilherme Penedo, Hynek Kydlicek, Anton Lozhkov, Margaret Mitchell, Colin A Raffel, Leandro Von Werra, Thomas Wolf, et al. The fineweb datasets: Decanting the web for the finest text data at scale. In NeurIPS, pages 30811-30849, 2024.", + "[22] Chongyang Bai, Xiaoxue Zang, Ying Xu, Srinivas Sunkara, Abhinav Rastogi, Jindong Chen, and Blaise Aguera y Arcas. Uibert: Learning generic multimodal representations for ui understanding, 2021.", + "[23] Yuxiang Chai, Siyuan Huang, Yazhe Niu, Han Xiao, Liang Liu, Dingyu Zhang, Peng Gao, Shuai Ren, and Hongsheng Li. Amex: Android multi-annotation expo dataset for mobile gui agents. arXiv preprint arXiv:2407.17490, 2024.", + "[24] Yang Li, Jiacong He, Xin Zhou, Yuan Zhang, and Jason Baldridge. Mapping natural language instructions to mobile ui action sequences. arXiv preprint arXiv:2005.03776, 2020.", + "[25] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of $100+$ language models. In ACL, 2024.", + "[26] Yaowei Zheng, Junting Lu, Shenzhi Wang, and Y Xiong. Easyr1: An efficient, scalable, multimodality rl training framework, 2025.", + "[27] Wei Li, William Bishop, Alice Li, Chris Rawles, Folawiyo Campbell-Ajala, Divya Tyamagundlu, and Oriana Riva. On the effects of data scale on computer control agents. arXiv preprint arXiv:2406.03679, 2024.", + "[28] Quanfeng Lu, Wenqi Shao, Zitao Liu, Fanqing Meng, Boxuan Li, Botong Chen, Siyuan Huang, Kaipeng Zhang, Yu Qiao, and Ping Luo. Gui odyssey: A comprehensive dataset for cross-app gui navigation on mobile devices. arXiv preprint arXiv:2406.08451, 2024.", + "[29] Kaixin Li, Ziyang Meng, Hongzhan Lin, Ziyang Luo, Yuchen Tian, Jing Ma, Zhiyong Huang, and Tat-Seng Chua. Screenshot-pro: Gui grounding for professional high-resolution computer use. Workshop on Reasoning and Planning for Large Language Models, 2025.", + "[30] Wentong Chen, Junbo Cui, Jinyi Hu, Yujia Qin, Junjie Fang, Yue Zhao, Chongyi Wang, Jun Liu, Guirong Chen, Yupeng Huo, et al. Guicourse: From general vision language models to versatile gui agents. arXiv preprint arXiv:2406.11317, 2024.", + "[31] Raghav Kapoor, Yash Parag Butala, Melisa Russak, Jing Yu Koh, Kiran Kamble, Waseem Al-Shikh, and Ruslan Salakhutdinov. Omniact: A dataset and benchmark for enabling multimodal generalist autonomous agents for desktop and web. In ECCV, pages 161-178. Springer, 2024." + ], + "bbox": [ + 173, + 90, + 825, + 825 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10458/7fea48cf-977d-4933-8361-31658163081b_model.json b/data/2025/2504_10xxx/2504.10458/7fea48cf-977d-4933-8361-31658163081b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..274a7371cb77ffd51db72fc45a1c4240884bfeda --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/7fea48cf-977d-4933-8361-31658163081b_model.json @@ -0,0 +1,1696 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.283, + 0.058, + 0.712 + ], + "angle": 270, + "content": "arXiv:2504.10458v4 [cs.CV] 1 Oct 2025" + }, + { + "type": "title", + "bbox": [ + 0.203, + 0.123, + 0.797, + 0.175 + ], + "angle": 0, + "content": "GUI-R1: A Generalist R1-Style Vision-Language Action Model For GUI Agents" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.224, + 0.75, + 0.239 + ], + "angle": 0, + "content": "Run Luo\\(^{1,2}\\) Lu Wang\\(^{3}\\) Wanwei He\\(^{1,2}\\) Longze Chen\\(^{1,2}\\) Jiaming Li\\(^{1,2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.409, + 0.24, + 0.59, + 0.255 + ], + "angle": 0, + "content": "Min Yang\\(^{1,2}\\) Xiaobo Xia\\(^{3}\\)" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.256, + 0.726, + 0.269 + ], + "angle": 0, + "content": "1Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.365, + 0.269, + 0.633, + 0.283 + ], + "angle": 0, + "content": "\\(^{2}\\)University of Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.398, + 0.283, + 0.602, + 0.297 + ], + "angle": 0, + "content": "\\(^{3}\\)National University of Singapore" + }, + { + "type": "text", + "bbox": [ + 0.253, + 0.297, + 0.746, + 0.311 + ], + "angle": 0, + "content": "\\(\\left\\{\\mathrm{R.LUO@SIAT.AC.CN}\\right.\\) M.YANG@SIAT.AC.CN XIAOBOXIA.UNI@GMAIL.COM}" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.347, + 0.538, + 0.362 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.377, + 0.767, + 0.668 + ], + "angle": 0, + "content": "Existing efforts in building graphical user interface (GUI) agents largely rely on the training paradigm of supervised fine-tuning (SFT) on large vision-language models (LVLMs). However, this approach not only demands extensive amounts of training data but also struggles to effectively understand GUI screenshots and generalize to unseen interfaces. The issue significantly limits its application in real-world scenarios, especially for high-level tasks. Inspired by reinforcement fine-tuning (RFT) in large reasoning models (e.g., DeepSeek-R1), which efficiently enhances the problem-solving capabilities of large language models in real-world settings, we propose GUI-R1, the first reinforcement learning framework designed to enhance the GUI capabilities of LVLMs in high-level real-world task scenarios, through unified action space rule modeling. By leveraging a small amount of carefully curated high-quality data across multiple platforms (including Windows, Linux, MacOS, Android, and Web) and employing policy optimization algorithms such as group relative policy optimization (GRPO) to update the model, GUI-R1 achieves superior performance using only \\(0.02\\%\\) of the data (3K vs. 13M) compared to previous state-of-the-art methods like OS-Atlas across eight benchmarks spanning three different platforms (mobile, desktop, and web). These results demonstrate the immense potential of reinforcement learning based on unified action space rule modeling in improving the execution capabilities of LVLMs for real-world GUI agent tasks. The codebase is available at https://github.com/ritzzz-ai/GUI-R1.git." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.698, + 0.314, + 0.714 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.729, + 0.825, + 0.856 + ], + "angle": 0, + "content": "Recent studies [1; 2; 3] have explored the use of large vision-language models (LVLMs) [4] to develop graphical user interface (GUI) agents capable of performing high-level complex tasks. These agents analyze the screen as a self-contained source of information for decision-making, without relying on environment-based textual descriptions such as HTML or accessibility trees. This approach offers greater flexibility in agent decision-making. However, previous works have predominantly relied on the training paradigm of supervised fine-tuning (SFT), which not only requires large amounts of high-quality training data but also struggles to effectively comprehend GUI screenshots and generalize to unseen interfaces. These limitations have significantly hindered the real-world applicability of these works, particularly for high-level GUI tasks that lack explicit step-by-step instructions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.86, + 0.825, + 0.903 + ], + "angle": 0, + "content": "Rule-based reinforcement fine-tuning has recently emerged as an efficient and scalable alternative to SFT, requiring only a small number of examples to fine-tune models effectively while demonstrating strong performance and generalization capabilities in domain-specific tasks. RFT has been increas" + }, + { + "type": "footer", + "bbox": [ + 0.172, + 0.923, + 0.276, + 0.937 + ], + "angle": 0, + "content": "Technical report." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.175, + 0.103, + 0.386, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.203, + 0.256, + 0.356, + 0.271 + ], + "angle": 0, + "content": "(a) Grounding capability." + }, + { + "type": "image", + "bbox": [ + 0.395, + 0.096, + 0.605, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.411, + 0.256, + 0.588, + 0.271 + ], + "angle": 0, + "content": "(b) Low-level task capability." + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.091, + 0.825, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.63, + 0.256, + 0.808, + 0.271 + ], + "angle": 0, + "content": "(c) High-level task capability." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.278, + 0.825, + 0.308 + ], + "angle": 0, + "content": "Figure 1: GUI-R1 achieves the best performance on eight evaluation datasets covering various platforms and task granularities, demonstrating the promising potential of RFT in GUI agent tasks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.333, + 0.827, + 0.472 + ], + "angle": 0, + "content": "ingly adopted for developing various LVLMs [5; 6; 7; 8; 9]. Inspired by these advancements, this study extends the rule-based reinforcement learning (RL) paradigm to the domain of GUI agents, which focuses on GUI action prediction tasks within a unified action space driven by high-level instructions. Specifically, LVLMs generate multiple responses (trajectories) for each input, containing both reasoning traces and final answers. These responses are evaluated using a unified action space reward function designed in this work, and the model is updated through policy optimization [10]. This iterative self-learning process enhances the model's reasoning capabilities in action prediction and its generalization to out-of-distribution (OOD) scenarios. By modeling a unified action space, we efficiently curate high-quality data spanning multiple platforms, including Windows, Linux, MacOS, Android, and Web, while avoiding action prediction conflicts across different platforms." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.478, + 0.825, + 0.548 + ], + "angle": 0, + "content": "As demonstrated in Figure 1, the proposed framework (GUI-R1) achieves superior performance using only \\(0.02\\%\\) of the data (3K vs. 13M) compared to previous state-of-the-art methods like OSAtlas [1] across eight benchmarks covering three different platforms (mobile, desktop, and web) and three levels of task granularity (low-level grounding, low-level tasks, and high-level tasks). Before delving into details, we clearly emphasize our contribution as follows." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.558, + 0.825, + 0.587 + ], + "angle": 0, + "content": "- We propose GUI-R1, the first framework that utilizes rule-based reinforcement fine-tuning to enhance the reasoning capabilities of LVLMs in high-level GUI action prediction tasks." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.591, + 0.825, + 0.633 + ], + "angle": 0, + "content": "- We design a rule-based unified action space reward function, which efficiently validates GUI task responses across different platforms and task granularities. This ensures reliable and efficient data selection and model training." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.637, + 0.825, + 0.68 + ], + "angle": 0, + "content": "- Leveraging the rule-based unified action space reward function, we construct GUI-R1-3K, which is a high-quality fine-tuning dataset with diversity and complexity. This dataset significantly improves both training efficiency and model performance." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.684, + 0.825, + 0.753 + ], + "angle": 0, + "content": "- We conduct a comprehensive evaluation of GUI agents, covering three distinct platforms (desktop, mobile, and web) and three levels of task granularity (low-level grounding, low-level tasks, and high-level tasks) across eight benchmarks. Experimental results demonstrate that our GUI-R1 is leading in multiple realistic cases. This creates a strong baseline of GUI agents for future research." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.558, + 0.825, + 0.753 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.772, + 0.323, + 0.789 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.803, + 0.297, + 0.818 + ], + "angle": 0, + "content": "2.1 GUI Agents" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.828, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Autonomous agents driven by large foundation models (e.g., large language models (LLMs) and large vision-language models (LVLMs)) have gained significant attention for their powerful interactive capabilities [11]. These operating systems via programs or API calls [12; 13]. However, the closed-source nature of most commercial software limits access to internal APIs or code, which promotes a shift in research toward GUI agents. Different from traditional programmatic agents, GUI agents simulate human interactions via mouse and keyboard inputs, which enable broader flexibility" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.231 + ], + "angle": 0, + "content": "in solving complex tasks. Recent works have advanced this direction. For instance, UGround [14] developed a specialized GUI grounding model for precise GUI element localization. OS-Atlas [1] introduced large action models to handle general agent tasks by interpreting human intentions and predicting actions in the form of function calls. UITars [2] proposed a more comprehensive method by combining GUI-related pretraining with task-level reasoning fine-tuning to better capture the complexity of GUI interactions. Nevertheless, these methods all rely on the paradigm of supervised fine-tuning (SFT), which suffers from two main limitations: (1) the training process requires vast amounts of diverse data; (2) the models exhibit limited generalization capabilities, which struggle to understand GUI screenshots and adapt to unseen interfaces. These limitations motivate the development of a more advanced learning paradigm for GUI agents beyond traditional SFT methods." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.247, + 0.406, + 0.263 + ], + "angle": 0, + "content": "2.2 Reinforcement Fine-Tuning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.825, + 0.453 + ], + "angle": 0, + "content": "Rule-based reinforcement fine-tuning, exemplified by OpenAI o1 [15] and DeepSeek-R1 [10], has demonstrated strong performance in mathematical reasoning [16], code generation [17], and multi-step logic tasks [18]. Subsequent studies have extended this paradigm to multimodal models by designing task-specific reward functions for vision-based tasks, such as correct class prediction in image classificati [19; 7; 20], intersection-over-union (IoU) metrics in image localization and detection [6; 5], and accurate click position prediction in low-level GUI grounding tasks [9]. These works demonstrate that verifiable reward signals, e.g., symbolic correctness or execution-based feedback, can effectively substitute traditional supervision. Despite the strong potential of RFT in various tasks, it remains underexplored in complex high-level GUI agent tasks. Compared to other domains, building intelligent agents for high-level GUI tasks is particularly challenging due to diverse UI layouts, implicit task semantics, and long-horizon action dependencies. This imposes higher demands on the model's contextual learning and understanding capabilities. To the best of our knowledge, GUI-R1 is the first RFT-based framework specifically designed for high-level GUI agents." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.473, + 0.378, + 0.489 + ], + "angle": 0, + "content": "3 GUI-R1 Framework" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.511, + 0.818, + 0.688 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.697, + 0.825, + 0.754 + ], + "angle": 0, + "content": "Figure 2: Overview of the GUI-R1 Framework. Given the high-level instruction, action history, and visual image inputs, the policy model generates multiple responses containing reasoning steps. Then the verifiable rewards, such as action type reward, click point reward, and input text reward, are used with the policy gradient optimization algorithm to update the policy model." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.771, + 0.825, + 0.843 + ], + "angle": 0, + "content": "GUI-R1 is based on a reinforcement learning training paradigm designed to enhance the ability of GUI agents to complete sophisticated instructional tasks. As shown in Figure 2, unlike low-level tasks, high-level GUI tasks lack explicit and fine-grained instructions, which require action predictions based on high-level task objectives and execution history. This imposes greater demands on the model's contextual learning and understanding capabilities." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.858, + 0.308, + 0.872 + ], + "angle": 0, + "content": "3.1 Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "We define the goal of GUI agents in high-level instructional tasks as understanding and executing low-level instructions to complete the high-level task \\( Q \\), based on the current interface image \\( I \\)" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.176 + ], + "angle": 0, + "content": "and the execution history \\( H \\). Formally, given the input \\( Q \\), \\( I \\), and \\( H \\), the model generates a set of candidate responses \\( O = \\{o_1, o_2, \\dots, o_N\\} \\), where each response contains attributes of the predicted low-level action \\( o^{\\mathrm{act}} \\), input text \\( o^{\\mathrm{text}} \\), and input point \\( o^{\\mathrm{point}} \\). Each response is evaluated using a unified action space reward function to compute its reward \\( \\{r_1, r_2, \\dots, r_N\\} \\). GRPO [10] is applied to estimate advantages and update the policy model under KL divergence constraints. The relative advantage \\( A_i \\) of the \\( i \\)-th response is calculated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.378, + 0.192, + 0.619, + 0.226 + ], + "angle": 0, + "content": "\\[\nA _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} (\\{r _ {1} , r _ {2} , \\ldots , r _ {N} \\})}{\\operatorname {s t d} (\\{r _ {1} , r _ {2} , \\ldots , r _ {N} \\})},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.233, + 0.761, + 0.248 + ], + "angle": 0, + "content": "where mean and std denote the mean and standard deviation of the rewards, respectively." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.262, + 0.515, + 0.278 + ], + "angle": 0, + "content": "3.2 Verifiable Rewards in Unified Action Space" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.288, + 0.825, + 0.37 + ], + "angle": 0, + "content": "We adopt a unified action space modeling strategy, which extracts action space categories across different platforms and integrates them into a unified action space. This ensures that all high-level instructions can be decomposed into a sequence of atomic actions, resolving action space conflicts in multi-platform data joint training. Based on the unified action space, we design verifiable reward functions to evaluate the accuracy of predicted actions to guide reinforcement learning. We detail these verifiable rewards below." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.378, + 0.825, + 0.474 + ], + "angle": 0, + "content": "Format reward. Following previous work [20; 10; 6], we introduce format rewards during training to evaluate whether the generated output adheres to the expected structural format, including both syntactic and semantic validity. Specifically, format rewards guide the model to generate reasoning processes and final answers in a structured format, which play a critical role in self-learning and iterative improvement during reinforcement fine-tuning. The format reward templates used in training and inference are as follows, where ‘’ represents the reasoning process and ‘’ represents the final answer." + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.485, + 0.611, + 0.499 + ], + "angle": 0, + "content": "Unified Action Space Prompt for Task Training and Inference" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.506, + 0.801, + 0.588 + ], + "angle": 0, + "content": "You are GUI-R1, a reasoning GUI Agent Assistant. In this UI screenshot \\(<\\) image \\(>\\), I want you to continue executing the command task, with the action history being history. Please provide the action to perform (enterprise from [complete, close/delete, press_home, click, press_back, type, select, scroll, enter]), the point where the cursor is moved to (integer) if a click is performed, and any input text required to complete the action." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.589, + 0.801, + 0.66 + ], + "angle": 0, + "content": "Output the thinking process in tags, and the final answer in tags as follows: ... [action]: enum[complete, close/delete, press_home, click, press_back, type, select, scroll, enter], 'point': [x, y], 'input_text': 'no input text [default]']." + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.678, + 0.652, + 0.693 + ], + "angle": 0, + "content": "Unified Action Space Prompt for Grounding Training and Inference" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.699, + 0.801, + 0.768 + ], + "angle": 0, + "content": "You are GUI-R1, a reasoning GUI Agent Assistant. In this UI screenshot \\(<\\) image \\(>\\), I want you to continue executing the command task, with the action history being history. Please provide the action to perform (Enumerate from [click]), the point where the cursor is moved to (integer) if a click is performed, and any input text required to complete the action." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.769, + 0.801, + 0.826 + ], + "angle": 0, + "content": "Output the thinking process in tags, and the final answer in tags as follows: ... [‘action’: enum(click], ‘point’: [x, y], ‘input_text’: ‘no input text [default]’." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.842, + 0.825, + 0.911 + ], + "angle": 0, + "content": "Accuracy rewards. For the model's predicted output \\( o = \\{o^{\\mathrm{act}}, o^{\\mathrm{text}}, o^{\\mathrm{point}}\\} \\), which consists of three components: \\( o^{\\mathrm{act}} \\) (action type, e.g., click, scroll), \\( o^{\\mathrm{point}} \\) (click point position), and \\( o^{\\mathrm{text}} \\) (input text), we define the accuracy reward \\( R_{\\mathrm{acc}} \\) as a combination of action type reward \\( R_{\\mathrm{act}} \\), click point reward \\( R_{\\mathrm{point}} \\), and input text reward \\( R_{\\mathrm{text}} \\), i.e., \\( R_{\\mathrm{acc}} = R_{\\mathrm{act}} + R_{\\mathrm{point}} + R_{\\mathrm{text}} \\). This design provides reliable correctness rewards for all actions." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "Action type reward. The action type reward \\( R_{\\mathrm{act}} \\) is calculated by comparing the predicted action type \\( o^{\\mathrm{act}} \\) with the ground truth action type \\( gt^{\\mathrm{act}} \\). If \\( o^{\\mathrm{act}} == gt^{\\mathrm{act}} \\), the reward is 1; otherwise, it is 0. This simple yet effective evaluation mechanism guides action type prediction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.826, + 0.184 + ], + "angle": 0, + "content": "Click point reward. The click point reward \\( R_{\\mathrm{point}} \\) is calculated by comparing the predicted click point \\( o^{\\mathrm{point}} = [x, y] \\) with the ground truth bounding box \\( gt^{\\mathrm{bbox}} = [x_1, y_1, x_2, y_2] \\). The calculation formula is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.393, + 0.206, + 0.603, + 0.24 + ], + "angle": 0, + "content": "\\[\nR _ {\\text {p o i n t}} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} o ^ {\\text {p o i n t}} \\in g t ^ {\\text {b b o x}}, \\\\ 0 & \\text {o t h e r w i s e}. \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.256, + 0.825, + 0.298 + ], + "angle": 0, + "content": "Input text reward The input text reward \\( R_{\\mathrm{text}} \\) is calculated by comparing the predicted input text \\( o^{\\mathrm{text}} \\) with the ground truth text parameter \\( gt^{\\mathrm{text}} \\) using the semantic \\( F_1 \\) score. The calculation formula is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.372, + 0.321, + 0.624, + 0.356 + ], + "angle": 0, + "content": "\\[\nR _ {\\text {t e x t}} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} F _ {1} (o ^ {\\text {t e x t}}, g t ^ {\\text {t e x t}}) > 0. 5, \\\\ 0 & \\text {o t h e r w i s e .} \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.371, + 0.825, + 0.414 + ], + "angle": 0, + "content": "Response reward. The final response reward is composed of format rewards and accuracy rewards, defined as: \\( R_{o} = \\alpha R_{\\mathrm{f}} + \\beta R_{\\mathrm{acc}} \\), where \\( R_{\\mathrm{f}} \\) represents the format reward, \\( R_{\\mathrm{acc}} \\) represents the accuracy reward, and \\( \\alpha \\) and \\( \\beta \\) are weighting parameters respectively." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.433, + 0.379, + 0.449 + ], + "angle": 0, + "content": "3.3 Training Data Curation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.459, + 0.825, + 0.543 + ], + "angle": 0, + "content": "Data collection. We collect data related to GUI tasks from multiple open-source datasets, including FineWeb [21], UIBert [22], AMEX [23], RICOSCA [24], as well as portions of pretraining data from Seeclick [3] and OS-Otlas [1]. This leads to \\(\\sim 14\\mathrm{M}\\) examples of grounding and low-level task data. Additionally, we collect \\(\\sim 30\\mathrm{K}\\) high-level GUI data points from OS-Otlas instruction datasets. In total, we gather \\(\\sim 14\\mathrm{M}\\) examples spanning multiple platforms (including Windows, Linux, MacOS, Android, and Web) and various task granularities (grounding, low-level, and high-level)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.549, + 0.826, + 0.661 + ], + "angle": 0, + "content": "Data filtering. To filter out low-quality data for efficient RFT, we use the Qwen2.5VL-7B [4] model to generate 10 responses for each example and evaluate them using a rule-based reward function designed for unified action space modeling. We remove the problems with an estimated accuracy of 0 or 1 to ensure a stable training process, resulting in 140K low-level data and 1.5K high-level data. Since the quantity of low-level data far exceeds that of high-level data, we randomly sample 1.5K low-level data and combine it with all high-level data to create a balanced dataset of 3K high-quality training examples, named GUI-R1-3K. The distribution of image categories, action types, and corresponding difficulty levels is demonstrated in Figure 3." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.678, + 0.49, + 0.839 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.206, + 0.849, + 0.462, + 0.861 + ], + "angle": 0, + "content": "(a) Image category quantity and difficulty distribution." + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.678, + 0.822, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.534, + 0.849, + 0.794, + 0.861 + ], + "angle": 0, + "content": "(b) Action category quantity and difficulty distribution." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.896 + ], + "angle": 0, + "content": "Figure 3: Illustrations of image and action category quantity and difficulty distributions in the dataset GUI-R1-3K." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.314, + 0.108 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.126, + 0.378, + 0.141 + ], + "angle": 0, + "content": "4.1 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.155, + 0.825, + 0.239 + ], + "angle": 0, + "content": "Training and inference details. For supervised fine-tuning (SFT), we use the QwenVL2.5-3B/7B [4] model as the base model for experiments and employ the LLaMA Factory [25] framework for one epoch of training to avoid overfitting. For RFT, we use the EasyR1 [26] framework for training over nine epochs. During inference, to ensure fairness, we apply a unified and simple prompt across all comparison methods, and conduct experiments under zero-shot prompt configurations. All experiments are conducted using \\(8 \\times\\) NVIDIA A100-80G GPUs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.244, + 0.825, + 0.301 + ], + "angle": 0, + "content": "Evaluation benchmarks. We evaluate our model on eight agent benchmarks on three different platforms, including AndroidControl-Low [27], AndroidControl-High [27], GUI-Odyssey [28], ScreenSpot [3], ScreenSpot-Pro [29], GUI-Act-Web [30], OmniAct-Web [31], and OmniAct-Desktop [31]. We only use the test splits of these benchmarks for evaluation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.306, + 0.826, + 0.405 + ], + "angle": 0, + "content": "Evaluation metrics. Following Os-Atlas [1], we use three commonly adopted metrics for GUI agents in evaluation: action type prediction accuracy, click point prediction accuracy, and step success rate, denoted as Type, Grounding, and SR, respectively. In more detail, Type measures the exact match score between the predicted action types (e.g., 'click' and 'scroll') and the ground truth. Grounding evaluates the performance of GUI grounding in downstream tasks. Besides, SR represents the step-wise success rate, where a step is deemed successful only if both the predicted action and its associated arguments (e.g., point for click actions and input text for scroll actions) are correct." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.428, + 0.364, + 0.443 + ], + "angle": 0, + "content": "4.2 Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.457, + 0.825, + 0.486 + ], + "angle": 0, + "content": "We here evaluate our GUI-R1 model by comparing it with current state-of-the-art (SOTA) models on various tasks including GUI grounding tasks, GUI low-level tasks, and GUI high-level tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.491, + 0.825, + 0.548 + ], + "angle": 0, + "content": "Grounding capability. We evaluate the grounding capability of GUI-R1 using ScreenSpot [3] and ScreenSpot-Pro [29]. ScreenSpot assesses GUI grounding performance across mobile, desktop, and web platforms, while ScreenSpot-Pro focuses on high-resolution professional environments, featuring expert-annotated tasks spanning 23 applications, five industries, and three operating systems." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.553, + 0.826, + 0.706 + ], + "angle": 0, + "content": "As shown in Table 1, compared to the previous SOTA model Os-Atlas-7B, which was trained with large-scale data using supervised fine-tuning (SFT), the RFT approach achieves superior performance on the 3B-sized Qwen2.5-VL model using only \\(0.2\\%\\) of the data (3K vs. 14M). Furthermore, compared to the base models QwenVL2.5-3B/7B and the SFT-trained QwenVL2.5* 3B/7B models using the same dataset, the RFT-based GUI-R1 demonstrates significantly better performance in GUI grounding tasks. Moreover, at the 3B scale, GUI-R1 achieves substantial gains over SFT models on ScreenSpot (80.08 vs. 63.55) and ScreenSpot-Pro (25.23 vs. 13.80), representing improvements of \\(26.3\\%\\) and \\(82.8\\%\\), respectively. This highlights the effectiveness of the RL training framework in leveraging small-scale datasets to achieve significant performance improvements, which demonstrates its potential as a data-efficient and scalable approach for model training in resource-constrained environments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.712, + 0.825, + 0.782 + ], + "angle": 0, + "content": "Low-level task capability. We evaluate the low-level task execution capability of GUI-R1 using four benchmark datasets: AndroidControl-Low [27], GUI-Act-Web [29], OmniAct-Web, and OmniAct-Desktop [31]. AndroidControl-Low evaluates low-level task execution on mobile platforms, while GUI-Act-Web and OmniAct-Web focus on low-level task execution on web platforms. OmniAct-Desktop is used to test low-level task execution on desktop platforms." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.787, + 0.826, + 0.913 + ], + "angle": 0, + "content": "As demonstrated in Table 2, our method impressively improves the success rate of GUI low-level tasks for 3B and 7B models, with the average success rate increasing from 55.65 to 80.88 at the 3B scale. Compared to UI-R1 [9], which is concurrent work also trained using RFT, our model achieves a 10-point improvement at the 3B scale, validating that RL training focused on high-level tasks can further enhance the model's understanding of low-level instructions. Note that an interesting observation is that the use of small-scale SFT data even leads to performance degradation on some metrics such as GR on AndroidControl-Low. This limitation stems from SFT's reliance on task-specific labeled data, which constrains the model's ability to adapt to unseen environments. In contrast, our RFT method not only enhances out-of-distribution (OOD) generalization by optimizing" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.098, + 0.825, + 0.138 + ], + "angle": 0, + "content": "Table 1: GUI grounding accuracy on ScreenSpot and ScreenSpot-Pro. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.139, + 0.825, + 0.43 + ], + "angle": 0, + "content": "
ModelsScreenSpot-ProScreenSpot
DevCreativeCADScientificOfficeOSWebDesktop
TextIconTextIconTextIconTextIconTextIconTextIconTextIconText
Supervised Fine-Tuning
SeeClick0.60.01.00.02.50.03.50.01.10.02.80.055.732.572.230.0
Os-Atlas-4B7.10.03.01.42.00.09.05.55.13.85.60.082.663.172.145.7
ShowUI-2B16.91.49.10.02.50.013.27.315.37.510.32.2----
CogAgent-18B14.90.79.60.07.13.122.21.813.00.05.60.070.428.674.220.0
Aria-GUI16.20.023.72.17.61.627.16.420.31.94.70.0----
UGround-7B26.62.127.32.814.21.631.92.731.611.317.80.080.470.482.563.6
Claude**22.03.925.93.414.53.733.915.830.116.311.04.5----
Os-Atlas-7B33.11.428.82.812.24.737.57.333.95.727.14.590.874.291.762.8
QwenVL2.5-3B*20.31.824.62.811.24.739.56.428.65.717.82.273.048.585.746.2
QwenVL2.5-7B*31.41.827.33.515.75.140.77.939.78.932.46.987.868.290.362.8
Zero Shot
QwenVL-7B0.00.00.00.00.00.00.70.00.00.00.00.0----
GPT-4o1.30.01.00.02.00.02.10.01.10.00.00.0----
QwenVL2.5-3B16.21.423.31.410.24.738.26.424.33.815.01.160.843.570.135.0
QwenVL2.5-7B33.12.123.73.512.26.336.87.337.87.530.86.986.965.189.760.0
Reinforcement Fine-Tuning
UI-R1-3B22.74.127.33.511.26.343.411.832.211.313.14.585.273.390.259.3
GUI-R1-3B33.84.840.95.626.47.861.817.353.617.028.15.689.672.193.864.8
GUI-R1-7B49.44.838.98.423.96.355.611.858.726.442.116.991.375.791.873.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.455, + 0.825, + 0.496 + ], + "angle": 0, + "content": "Table 2: GUI low-level task accuracy on GUI-Act-Web, OmniAct-Web, OmniAct-Desktop, and AndroidControl-Low. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.497, + 0.825, + 0.703 + ], + "angle": 0, + "content": "
ModelsGUI-Act-WebOmniAct-WebOmniAct-DesktopAndroidControl-LowOverall
TypeGRSRTypeGRSRTypeGRSRTypeGRSR
Supervised Fine-Tuning
Os-Atlas-4B79.2258.5742.6246.7449.2422.9963.3042.5526.9464.5871.1940.6250.71
Os-Atlas-7B86.9575.6157.0285.6369.3559.1590.2462.8756.7373.0073.3750.9470.07
QwenVL2.5-3B*76.9566.3461.6966.2456.9153.0277.6262.5463.7671.0874.5358.7965.79
QwenVL2.5-7B*87.6684.7779.8981.6273.4573.3986.2380.1779.8084.0085.7464.3280.09
Zero Shot
GPT-4o77.0945.0241.8479.3342.7934.0679.9763.2550.6774.3338.6728.3954.46
QwenVL2.5-3B56.1064.2855.6150.6346.8947.0256.9547.9746.8962.0374.0759.3255.65
QwenVL2.5-7B86.5984.3978.6379.1571.3271.2184.7479.8979.6683.4487.0862.5079.05
Reinforcement Fine-Tuning
UI-R1-3B75.8979.4367.3175.4261.3561.3373.4164.1263.9879.1582.4166.4470.85
GUI-R1-3B89.8687.4276.3188.5875.1075.0891.8678.3778.3183.6881.5964.4180.88
GUI-R1-7B90.8588.0680.3191.1677.2977.3592.2083.3683.3385.1784.0266.5283.30
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.732, + 0.825, + 0.76 + ], + "angle": 0, + "content": "task-specific rewards but also achieves this with fewer training examples, which provides a scalable and efficient alternative to traditional SFT methods." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.767, + 0.825, + 0.824 + ], + "angle": 0, + "content": "High-level task capability. We evaluate the high-level task execution capability of GUI-R1 using AndroidControl-High [27] and GUI-Odyssey [28]. AndroidControl-High evaluates high-level task execution on mobile platforms, while GUI-Odyssey focuses on cross-app navigation scenarios, featuring high-level tasks spanning six applications and 203 apps." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.828, + 0.827, + 0.913 + ], + "angle": 0, + "content": "As shown in Table 3, due to our unified action space with rule-based reward modeling, GUI-R1 achieves SOTA on high-level GUI tasks. Compared to the closed-source model GPT-4o, our 3B-scale method achieves an absolute improvement of 21.06, demonstrating that RFT, in contrast to SFT, can efficiently and reliably enhance the success rate of GUI agents in real-world tasks. Furthermore, compared to UI-R1 [9], which focuses on improving low-level grounding capabilities, our model achieves an average improvement of 3.4 points at the 3B scale, with a particularly notable" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.098, + 0.825, + 0.138 + ], + "angle": 0, + "content": "Table 3: GUI high-level task accuracy on AndroidControl-High and GUI-Odyssey. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold." + }, + { + "type": "table", + "bbox": [ + 0.223, + 0.139, + 0.777, + 0.387 + ], + "angle": 0, + "content": "
ModelsAndroidControl-HighGUI-OdysseyOverall
TypeGRSRTypeGRSR
Supervised Fine-Tuning
OS-Atlas-4B49.0149.5122.7749.6334.6320.2537.63
OS-Atlas-7B57.4454.9029.8360.4239.7426.9644.88
QwenVL2.5-3B*52.0549.5341.2243.6932.2127.3141.00
QwenVL2.5-7B*69.1558.6948.1156.7838.6534.4450.97
Zero Shot
GPT-4o63.0630.9021.1737.5014.175.3628.69
QwenVL2.5-3B47.8146.5138.9037.4026.4926.6937.30
QwenVL2.5-7B68.6759.7147.0655.6037.7834.3750.53
Reinforcement Fine-Tuning
UI-R1-3B57.8555.7045.4452.1634.4632.4946.35
GUI-R1-3B58.0456.2446.5554.8441.5241.3349.75
GUI-R1-7B71.6365.5651.6765.4943.6438.7956.13
" + }, + { + "type": "image", + "bbox": [ + 0.292, + 0.4, + 0.703, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.295, + 0.555, + 0.702, + 0.57 + ], + "angle": 0, + "content": "Figure 4: Ablation study of image resolution and data quality." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.597, + 0.825, + 0.641 + ], + "angle": 0, + "content": "\\(27.2\\%\\) lead in the step success rate on GUI-Odyssey. This indicates that RL training focused on low-level tasks is insufficient for handling complex high-level instructions. RFT designed for high-level tasks is better suited as a direction for developing GUI agent models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.649, + 0.32, + 0.665 + ], + "angle": 0, + "content": "4.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.675, + 0.825, + 0.814 + ], + "angle": 0, + "content": "Image resolution and data quality. To investigate the impact of image resolution and data quality on GUI RFT, we conduct corresponding ablation experiments, with the results shown in Figure 4. As observed, when using the filtered GUI-R1-3K dataset, the model requires only a few updates to achieve relatively high rewards. In contrast, training with unfiltered and low-quality data necessitates significantly more training time for the model to converge, with a noticeably lower performance ceiling. To further explore the effect of image resolution on model training, we increase the image resolution to twice its original size (from 1,048,576 pixels to 2,097,152 pixels). As shown in Figure 4, because of the high resolution of GUI task images and the small size of many UI elements, increasing the image resolution allows the model to perceive these elements more clearly, which accelerates the convergence speed of RFT and improves the performance ceiling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.82, + 0.825, + 0.918 + ], + "angle": 0, + "content": "Coefficients in the reward function. To explore the impact of the coefficients for format rewards and accuracy rewards in the reward function on the final performance, we conduct relevant ablation experiments, as shown in Table 4. The results indicate that reducing the coefficient ratio of format rewards leads to consistent performance improvements. This is because format rewards are easier to learn during training and often converge early in the process. By amplifying the accuracy rewards, the advantages of providing correct answers are further emphasized, ultimately leading to more performance improvements." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.277, + 0.098, + 0.722, + 0.112 + ], + "angle": 0, + "content": "Table 4: Ablation study of the coefficient \\( \\alpha \\) and \\( \\beta \\) in reward function." + }, + { + "type": "table", + "bbox": [ + 0.288, + 0.113, + 0.71, + 0.193 + ], + "angle": 0, + "content": "
αβAndroidControl-HighGUI-OdysseyOverall
TypeGRSRTypeGRSR
0.20.858.0456.2446.5554.8441.5241.3349.75
0.50.557.9355.9146.6252.7737.4435.6647.72
0.80.257.8555.7045.4452.1634.4632.4946.48
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.218, + 0.306, + 0.232 + ], + "angle": 0, + "content": "4.4 Visualization" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.243, + 0.828, + 0.367 + ], + "angle": 0, + "content": "In Figure 5, we provide additional visualization of the training process. As shown in Figure 5a and Figure 5b, it can be observed that the format reward converges quickly in the early stages of training, while the accuracy reward becomes the main source of differentiated rewards in the later stages of training. Furthermore, as illustrated in Figure 5d, the mean response length first decreases and then gradually increases, but the \"aha moment\" does not occur. This may be due to the single-image input training method in a non-interactive environment, which prevents the model from autonomously tracing back the sequence of incorrect actions. Exploring multi-image high-level tasks in interactive environments could be a potential direction for inducing the emergence of the \"aha moment\" in future research." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.379, + 0.495, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.509, + 0.443, + 0.521 + ], + "angle": 0, + "content": "(a) Accuracy reward curve with training steps." + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.379, + 0.825, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.561, + 0.509, + 0.767, + 0.521 + ], + "angle": 0, + "content": "(b) Format reward curve with training steps." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.522, + 0.495, + 0.644 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.246, + 0.651, + 0.422, + 0.664 + ], + "angle": 0, + "content": "(c) PG loss curve with training steps." + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.522, + 0.825, + 0.644 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.543, + 0.651, + 0.785, + 0.664 + ], + "angle": 0, + "content": "(d) Mean response length curve with training steps." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.67, + 0.825, + 0.714 + ], + "angle": 0, + "content": "Figure 5: Visualization of the training process of GUI-R1. To provide more details, we report the curves of GUI-R1's key metrics during training, including format reward, accuracy reward, mean response length, and policy gradient (PG) loss, as they vary with the training steps." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.744, + 0.303, + 0.759 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.775, + 0.825, + 0.873 + ], + "angle": 0, + "content": "This paper presents GUI-R1, which is the first GUI reinforcement learning framework grounded in unified action space rule modeling. By integrating reinforcement fine-tuning with large vision-language models, GUI-R1 enables effective contextual action prediction and verifiable reward-driven learning in GUI environments. Extensive experiments demonstrate that GUI-R1 consistently outperforms baselines on various tasks. Moving forward, we plan to extend GUI-R1 to support collaborative multi-agent interaction and robust error correction policies, enabling the system to handle complex tasks with greater scalability." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.114, + 0.826, + 0.157 + ], + "angle": 0, + "content": "[1] Zhiyong Wu, Zhenyu Wu, Fangzhi Xu, Yian Wang, Qiushi Sun, Chengyou Jia, Kanzhi Cheng, Zichen Ding, Liheng Chen, Paul Pu Liang, et al. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.165, + 0.826, + 0.209 + ], + "angle": 0, + "content": "[2] Yujia Qin, Yining Ye, Junjie Fang, Haoming Wang, Shihao Liang, Shizuo Tian, Junda Zhang, Jiahao Li, Yunxin Li, Shijue Huang, et al. Ui-tars: Pioneering automated gui interaction with native agents. arXiv preprint arXiv:2501.12326, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.216, + 0.826, + 0.259 + ], + "angle": 0, + "content": "[3] Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.268, + 0.826, + 0.31 + ], + "angle": 0, + "content": "[4] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.319, + 0.826, + 0.361 + ], + "angle": 0, + "content": "[5] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.37, + 0.826, + 0.413 + ], + "angle": 0, + "content": "[6] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.422, + 0.826, + 0.464 + ], + "angle": 0, + "content": "[7] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.473, + 0.826, + 0.515 + ], + "angle": 0, + "content": "[8] Haozhan Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, 2025. Accessed: 2025-02-15." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.524, + 0.826, + 0.567 + ], + "angle": 0, + "content": "[9] Zhengxi Lu, Yuxiang Chai, Yaxuan Guo, Xi Yin, Liang Liu, Hao Wang, Guanjing Xiong, and Hongsheng Li. Ui-r1: Enhancing action prediction of gui agents by reinforcement learning. arXiv preprint arXiv:2503.21620, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.576, + 0.826, + 0.618 + ], + "angle": 0, + "content": "[10] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.627, + 0.826, + 0.657 + ], + "angle": 0, + "content": "[11] Theodore Sumers, Shunyu Yao, Karthik Narasimhan, and Thomas Griffiths. Cognitive architectures for language agents. Transactions on Machine Learning Research, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.665, + 0.826, + 0.708 + ], + "angle": 0, + "content": "[12] Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, et al. A survey on large language model based autonomous agents. Frontiers of Computer Science, 18(6):186345, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.716, + 0.826, + 0.758 + ], + "angle": 0, + "content": "[13] Qiushi Sun, Zhangyue Yin, Xiang Li, Zhiyong Wu, Xipeng Qiu, and Lingpeng Kong. Corex: Pushing the boundaries of complex reasoning through multi-model collaboration. arXiv preprint arXiv:2310.00280, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.767, + 0.826, + 0.81 + ], + "angle": 0, + "content": "[14] Boyu Gou, Ruohan Wang, Boyuan Zheng, Yanan Xie, Cheng Chang, Yiheng Shu, Huan Sun, and Yu Su. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.818, + 0.826, + 0.861 + ], + "angle": 0, + "content": "[15] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.87, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[16] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.114, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "[17] Jiawei Liu and Lingming Zhang. Code-r1: Reproducing r1 for code with reliable rewards. arXiv preprint arXiv:2503.18470, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.128, + 0.826, + 0.185 + ], + "angle": 0, + "content": "[18] Zihan Wang*, Kangrui Wang*, Qineng Wang*, Pingyue Zhang*, Linjie Li*, Zhengyuan Yang, Kefan Yu, Minh Nhat Nguyen, Monica Lam, Yiping Lu, Kyunghyun Cho, Jiajun Wu, Li Fei-Fei, Lijuan Wang, Yejin Choi, and Manling Li. Training agents by reinforcing reasoning, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.194, + 0.825, + 0.224 + ], + "angle": 0, + "content": "[19] Zhenyu Pan and Han Liu. Metaspatial: Reinforcing 3d spatial reasoning in vlms for the meta-verse. arXiv preprint arXiv:2503.18470, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.231, + 0.826, + 0.288 + ], + "angle": 0, + "content": "[20] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.297, + 0.826, + 0.34 + ], + "angle": 0, + "content": "[21] Guilherme Penedo, Hynek Kydlicek, Anton Lozhkov, Margaret Mitchell, Colin A Raffel, Leandro Von Werra, Thomas Wolf, et al. The fineweb datasets: Decanting the web for the finest text data at scale. In NeurIPS, pages 30811-30849, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.347, + 0.826, + 0.392 + ], + "angle": 0, + "content": "[22] Chongyang Bai, Xiaoxue Zang, Ying Xu, Srinivas Sunkara, Abhinav Rastogi, Jindong Chen, and Blaise Aguera y Arcas. Uibert: Learning generic multimodal representations for ui understanding, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.4, + 0.826, + 0.443 + ], + "angle": 0, + "content": "[23] Yuxiang Chai, Siyuan Huang, Yazhe Niu, Han Xiao, Liang Liu, Dingyu Zhang, Peng Gao, Shuai Ren, and Hongsheng Li. Amex: Android multi-annotation expo dataset for mobile gui agents. arXiv preprint arXiv:2407.17490, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.451, + 0.826, + 0.481 + ], + "angle": 0, + "content": "[24] Yang Li, Jiacong He, Xin Zhou, Yuan Zhang, and Jason Baldridge. Mapping natural language instructions to mobile ui action sequences. arXiv preprint arXiv:2005.03776, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.488, + 0.826, + 0.531 + ], + "angle": 0, + "content": "[25] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of \\(100+\\) language models. In ACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.54, + 0.826, + 0.57 + ], + "angle": 0, + "content": "[26] Yaowei Zheng, Junting Lu, Shenzhi Wang, and Y Xiong. Easyr1: An efficient, scalable, multimodality rl training framework, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.578, + 0.826, + 0.621 + ], + "angle": 0, + "content": "[27] Wei Li, William Bishop, Alice Li, Chris Rawles, Folawiyo Campbell-Ajala, Divya Tyamagundlu, and Oriana Riva. On the effects of data scale on computer control agents. arXiv preprint arXiv:2406.03679, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.629, + 0.826, + 0.673 + ], + "angle": 0, + "content": "[28] Quanfeng Lu, Wenqi Shao, Zitao Liu, Fanqing Meng, Boxuan Li, Botong Chen, Siyuan Huang, Kaipeng Zhang, Yu Qiao, and Ping Luo. Gui odyssey: A comprehensive dataset for cross-app gui navigation on mobile devices. arXiv preprint arXiv:2406.08451, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.681, + 0.826, + 0.724 + ], + "angle": 0, + "content": "[29] Kaixin Li, Ziyang Meng, Hongzhan Lin, Ziyang Luo, Yuchen Tian, Jing Ma, Zhiyong Huang, and Tat-Seng Chua. Screenshot-pro: Gui grounding for professional high-resolution computer use. Workshop on Reasoning and Planning for Large Language Models, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.732, + 0.826, + 0.775 + ], + "angle": 0, + "content": "[30] Wentong Chen, Junbo Cui, Jinyi Hu, Yujia Qin, Junjie Fang, Yue Zhao, Chongyi Wang, Jun Liu, Guirong Chen, Yupeng Huo, et al. Guicourse: From general vision language models to versatile gui agents. arXiv preprint arXiv:2406.11317, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.784, + 0.826, + 0.827 + ], + "angle": 0, + "content": "[31] Raghav Kapoor, Yash Parag Butala, Melisa Russak, Jing Yu Koh, Kiran Kamble, Waseem Al-Shikh, and Ruslan Salakhutdinov. Omniact: A dataset and benchmark for enabling multimodal generalist autonomous agents for desktop and web. In ECCV, pages 161-178. Springer, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10458/7fea48cf-977d-4933-8361-31658163081b_origin.pdf b/data/2025/2504_10xxx/2504.10458/7fea48cf-977d-4933-8361-31658163081b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..916b63099f000d003e4102bc14d7896c8aceeae1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/7fea48cf-977d-4933-8361-31658163081b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0917eee85a0147653d16222f3fd04f77ed74f7641c2e81e0922249baa1a13a3 +size 703521 diff --git a/data/2025/2504_10xxx/2504.10458/full.md b/data/2025/2504_10xxx/2504.10458/full.md new file mode 100644 index 0000000000000000000000000000000000000000..69557c297874595c0835ef91d9f169c6937ecd05 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/full.md @@ -0,0 +1,233 @@ +# GUI-R1: A Generalist R1-Style Vision-Language Action Model For GUI Agents + +Run Luo $^{1,2}$ Lu Wang $^{3}$ Wanwei He $^{1,2}$ Longze Chen $^{1,2}$ Jiaming Li $^{1,2}$ + +Min Yang $^{1,2}$ Xiaobo Xia $^{3}$ + +1Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences + +$^{2}$ University of Chinese Academy of Sciences + +$^{3}$ National University of Singapore + +$\left\{\mathrm{R.LUO@SIAT.AC.CN}\right.$ M.YANG@SIAT.AC.CN XIAOBOXIA.UNI@GMAIL.COM} + +# Abstract + +Existing efforts in building graphical user interface (GUI) agents largely rely on the training paradigm of supervised fine-tuning (SFT) on large vision-language models (LVLMs). However, this approach not only demands extensive amounts of training data but also struggles to effectively understand GUI screenshots and generalize to unseen interfaces. The issue significantly limits its application in real-world scenarios, especially for high-level tasks. Inspired by reinforcement fine-tuning (RFT) in large reasoning models (e.g., DeepSeek-R1), which efficiently enhances the problem-solving capabilities of large language models in real-world settings, we propose GUI-R1, the first reinforcement learning framework designed to enhance the GUI capabilities of LVLMs in high-level real-world task scenarios, through unified action space rule modeling. By leveraging a small amount of carefully curated high-quality data across multiple platforms (including Windows, Linux, MacOS, Android, and Web) and employing policy optimization algorithms such as group relative policy optimization (GRPO) to update the model, GUI-R1 achieves superior performance using only $0.02\%$ of the data (3K vs. 13M) compared to previous state-of-the-art methods like OS-Atlas across eight benchmarks spanning three different platforms (mobile, desktop, and web). These results demonstrate the immense potential of reinforcement learning based on unified action space rule modeling in improving the execution capabilities of LVLMs for real-world GUI agent tasks. The codebase is available at https://github.com/ritzzz-ai/GUI-R1.git. + +# 1 Introduction + +Recent studies [1; 2; 3] have explored the use of large vision-language models (LVLMs) [4] to develop graphical user interface (GUI) agents capable of performing high-level complex tasks. These agents analyze the screen as a self-contained source of information for decision-making, without relying on environment-based textual descriptions such as HTML or accessibility trees. This approach offers greater flexibility in agent decision-making. However, previous works have predominantly relied on the training paradigm of supervised fine-tuning (SFT), which not only requires large amounts of high-quality training data but also struggles to effectively comprehend GUI screenshots and generalize to unseen interfaces. These limitations have significantly hindered the real-world applicability of these works, particularly for high-level GUI tasks that lack explicit step-by-step instructions. + +Rule-based reinforcement fine-tuning has recently emerged as an efficient and scalable alternative to SFT, requiring only a small number of examples to fine-tune models effectively while demonstrating strong performance and generalization capabilities in domain-specific tasks. RFT has been increas + +![](images/a343d8658955f5376bd79dff57d34f2952e7c91622500bc8d7130f3f5cedf694.jpg) +(a) Grounding capability. + +![](images/512270404c63b21f462da324a0f27a925ac93742bbd95f28afe9e2cfeb780bbe.jpg) +(b) Low-level task capability. + +![](images/c6b9af241d50e031e4855976a092383154a95e81b4c574bc8196fcbcc277f33b.jpg) +(c) High-level task capability. +Figure 1: GUI-R1 achieves the best performance on eight evaluation datasets covering various platforms and task granularities, demonstrating the promising potential of RFT in GUI agent tasks. + +ingly adopted for developing various LVLMs [5; 6; 7; 8; 9]. Inspired by these advancements, this study extends the rule-based reinforcement learning (RL) paradigm to the domain of GUI agents, which focuses on GUI action prediction tasks within a unified action space driven by high-level instructions. Specifically, LVLMs generate multiple responses (trajectories) for each input, containing both reasoning traces and final answers. These responses are evaluated using a unified action space reward function designed in this work, and the model is updated through policy optimization [10]. This iterative self-learning process enhances the model's reasoning capabilities in action prediction and its generalization to out-of-distribution (OOD) scenarios. By modeling a unified action space, we efficiently curate high-quality data spanning multiple platforms, including Windows, Linux, MacOS, Android, and Web, while avoiding action prediction conflicts across different platforms. + +As demonstrated in Figure 1, the proposed framework (GUI-R1) achieves superior performance using only $0.02\%$ of the data (3K vs. 13M) compared to previous state-of-the-art methods like OSAtlas [1] across eight benchmarks covering three different platforms (mobile, desktop, and web) and three levels of task granularity (low-level grounding, low-level tasks, and high-level tasks). Before delving into details, we clearly emphasize our contribution as follows. + +- We propose GUI-R1, the first framework that utilizes rule-based reinforcement fine-tuning to enhance the reasoning capabilities of LVLMs in high-level GUI action prediction tasks. +- We design a rule-based unified action space reward function, which efficiently validates GUI task responses across different platforms and task granularities. This ensures reliable and efficient data selection and model training. +- Leveraging the rule-based unified action space reward function, we construct GUI-R1-3K, which is a high-quality fine-tuning dataset with diversity and complexity. This dataset significantly improves both training efficiency and model performance. +- We conduct a comprehensive evaluation of GUI agents, covering three distinct platforms (desktop, mobile, and web) and three levels of task granularity (low-level grounding, low-level tasks, and high-level tasks) across eight benchmarks. Experimental results demonstrate that our GUI-R1 is leading in multiple realistic cases. This creates a strong baseline of GUI agents for future research. + +# 2 Related Work + +# 2.1 GUI Agents + +Autonomous agents driven by large foundation models (e.g., large language models (LLMs) and large vision-language models (LVLMs)) have gained significant attention for their powerful interactive capabilities [11]. These operating systems via programs or API calls [12; 13]. However, the closed-source nature of most commercial software limits access to internal APIs or code, which promotes a shift in research toward GUI agents. Different from traditional programmatic agents, GUI agents simulate human interactions via mouse and keyboard inputs, which enable broader flexibility + +in solving complex tasks. Recent works have advanced this direction. For instance, UGround [14] developed a specialized GUI grounding model for precise GUI element localization. OS-Atlas [1] introduced large action models to handle general agent tasks by interpreting human intentions and predicting actions in the form of function calls. UITars [2] proposed a more comprehensive method by combining GUI-related pretraining with task-level reasoning fine-tuning to better capture the complexity of GUI interactions. Nevertheless, these methods all rely on the paradigm of supervised fine-tuning (SFT), which suffers from two main limitations: (1) the training process requires vast amounts of diverse data; (2) the models exhibit limited generalization capabilities, which struggle to understand GUI screenshots and adapt to unseen interfaces. These limitations motivate the development of a more advanced learning paradigm for GUI agents beyond traditional SFT methods. + +# 2.2 Reinforcement Fine-Tuning + +Rule-based reinforcement fine-tuning, exemplified by OpenAI o1 [15] and DeepSeek-R1 [10], has demonstrated strong performance in mathematical reasoning [16], code generation [17], and multi-step logic tasks [18]. Subsequent studies have extended this paradigm to multimodal models by designing task-specific reward functions for vision-based tasks, such as correct class prediction in image classificati [19; 7; 20], intersection-over-union (IoU) metrics in image localization and detection [6; 5], and accurate click position prediction in low-level GUI grounding tasks [9]. These works demonstrate that verifiable reward signals, e.g., symbolic correctness or execution-based feedback, can effectively substitute traditional supervision. Despite the strong potential of RFT in various tasks, it remains underexplored in complex high-level GUI agent tasks. Compared to other domains, building intelligent agents for high-level GUI tasks is particularly challenging due to diverse UI layouts, implicit task semantics, and long-horizon action dependencies. This imposes higher demands on the model's contextual learning and understanding capabilities. To the best of our knowledge, GUI-R1 is the first RFT-based framework specifically designed for high-level GUI agents. + +# 3 GUI-R1 Framework + +![](images/9fc200b1f5a26e06c697f75304bb82ca2b26802b7fc14558a867a6658bcecfc3.jpg) +Figure 2: Overview of the GUI-R1 Framework. Given the high-level instruction, action history, and visual image inputs, the policy model generates multiple responses containing reasoning steps. Then the verifiable rewards, such as action type reward, click point reward, and input text reward, are used with the policy gradient optimization algorithm to update the policy model. + +GUI-R1 is based on a reinforcement learning training paradigm designed to enhance the ability of GUI agents to complete sophisticated instructional tasks. As shown in Figure 2, unlike low-level tasks, high-level GUI tasks lack explicit and fine-grained instructions, which require action predictions based on high-level task objectives and execution history. This imposes greater demands on the model's contextual learning and understanding capabilities. + +# 3.1 Preliminaries + +We define the goal of GUI agents in high-level instructional tasks as understanding and executing low-level instructions to complete the high-level task $Q$ , based on the current interface image $I$ + +and the execution history $H$ . Formally, given the input $Q$ , $I$ , and $H$ , the model generates a set of candidate responses $O = \{o_1, o_2, \dots, o_N\}$ , where each response contains attributes of the predicted low-level action $o^{\mathrm{act}}$ , input text $o^{\mathrm{text}}$ , and input point $o^{\mathrm{point}}$ . Each response is evaluated using a unified action space reward function to compute its reward $\{r_1, r_2, \dots, r_N\}$ . GRPO [10] is applied to estimate advantages and update the policy model under KL divergence constraints. The relative advantage $A_i$ of the $i$ -th response is calculated as follows: + +$$ +A _ {i} = \frac {r _ {i} - \operatorname {m e a n} (\{r _ {1} , r _ {2} , \ldots , r _ {N} \})}{\operatorname {s t d} (\{r _ {1} , r _ {2} , \ldots , r _ {N} \})}, +$$ + +where mean and std denote the mean and standard deviation of the rewards, respectively. + +# 3.2 Verifiable Rewards in Unified Action Space + +We adopt a unified action space modeling strategy, which extracts action space categories across different platforms and integrates them into a unified action space. This ensures that all high-level instructions can be decomposed into a sequence of atomic actions, resolving action space conflicts in multi-platform data joint training. Based on the unified action space, we design verifiable reward functions to evaluate the accuracy of predicted actions to guide reinforcement learning. We detail these verifiable rewards below. + +Format reward. Following previous work [20; 10; 6], we introduce format rewards during training to evaluate whether the generated output adheres to the expected structural format, including both syntactic and semantic validity. Specifically, format rewards guide the model to generate reasoning processes and final answers in a structured format, which play a critical role in self-learning and iterative improvement during reinforcement fine-tuning. The format reward templates used in training and inference are as follows, where ‘’ represents the reasoning process and ‘’ represents the final answer. + +# Unified Action Space Prompt for Task Training and Inference + +You are GUI-R1, a reasoning GUI Agent Assistant. In this UI screenshot $<$ image $>$ , I want you to continue executing the command task, with the action history being history. Please provide the action to perform (enterprise from [complete, close/delete, press_home, click, press_back, type, select, scroll, enter]), the point where the cursor is moved to (integer) if a click is performed, and any input text required to complete the action. + +Output the thinking process in tags, and the final answer in tags as follows: ... [action]: enum[complete, close/delete, press_home, click, press_back, type, select, scroll, enter], 'point': [x, y], 'input_text': 'no input text [default]']. + +# Unified Action Space Prompt for Grounding Training and Inference + +You are GUI-R1, a reasoning GUI Agent Assistant. In this UI screenshot $<$ image $>$ , I want you to continue executing the command task, with the action history being history. Please provide the action to perform (Enumerate from [click]), the point where the cursor is moved to (integer) if a click is performed, and any input text required to complete the action. + +Output the thinking process in tags, and the final answer in tags as follows: ... [‘action’: enum(click], ‘point’: [x, y], ‘input_text’: ‘no input text [default]’. + +Accuracy rewards. For the model's predicted output $o = \{o^{\mathrm{act}}, o^{\mathrm{text}}, o^{\mathrm{point}}\}$ , which consists of three components: $o^{\mathrm{act}}$ (action type, e.g., click, scroll), $o^{\mathrm{point}}$ (click point position), and $o^{\mathrm{text}}$ (input text), we define the accuracy reward $R_{\mathrm{acc}}$ as a combination of action type reward $R_{\mathrm{act}}$ , click point reward $R_{\mathrm{point}}$ , and input text reward $R_{\mathrm{text}}$ , i.e., $R_{\mathrm{acc}} = R_{\mathrm{act}} + R_{\mathrm{point}} + R_{\mathrm{text}}$ . This design provides reliable correctness rewards for all actions. + +Action type reward. The action type reward $R_{\mathrm{act}}$ is calculated by comparing the predicted action type $o^{\mathrm{act}}$ with the ground truth action type $gt^{\mathrm{act}}$ . If $o^{\mathrm{act}} == gt^{\mathrm{act}}$ , the reward is 1; otherwise, it is 0. This simple yet effective evaluation mechanism guides action type prediction. + +Click point reward. The click point reward $R_{\mathrm{point}}$ is calculated by comparing the predicted click point $o^{\mathrm{point}} = [x, y]$ with the ground truth bounding box $gt^{\mathrm{bbox}} = [x_1, y_1, x_2, y_2]$ . The calculation formula is as follows: + +$$ +R _ {\text {p o i n t}} = \left\{ \begin{array}{l l} 1 & \text {i f} o ^ {\text {p o i n t}} \in g t ^ {\text {b b o x}}, \\ 0 & \text {o t h e r w i s e}. \end{array} \right. +$$ + +Input text reward The input text reward $R_{\mathrm{text}}$ is calculated by comparing the predicted input text $o^{\mathrm{text}}$ with the ground truth text parameter $gt^{\mathrm{text}}$ using the semantic $F_1$ score. The calculation formula is as follows: + +$$ +R _ {\text {t e x t}} = \left\{ \begin{array}{l l} 1 & \text {i f} F _ {1} (o ^ {\text {t e x t}}, g t ^ {\text {t e x t}}) > 0. 5, \\ 0 & \text {o t h e r w i s e .} \end{array} \right. +$$ + +Response reward. The final response reward is composed of format rewards and accuracy rewards, defined as: $R_{o} = \alpha R_{\mathrm{f}} + \beta R_{\mathrm{acc}}$ , where $R_{\mathrm{f}}$ represents the format reward, $R_{\mathrm{acc}}$ represents the accuracy reward, and $\alpha$ and $\beta$ are weighting parameters respectively. + +# 3.3 Training Data Curation + +Data collection. We collect data related to GUI tasks from multiple open-source datasets, including FineWeb [21], UIBert [22], AMEX [23], RICOSCA [24], as well as portions of pretraining data from Seeclick [3] and OS-Otlas [1]. This leads to $\sim 14\mathrm{M}$ examples of grounding and low-level task data. Additionally, we collect $\sim 30\mathrm{K}$ high-level GUI data points from OS-Otlas instruction datasets. In total, we gather $\sim 14\mathrm{M}$ examples spanning multiple platforms (including Windows, Linux, MacOS, Android, and Web) and various task granularities (grounding, low-level, and high-level). + +Data filtering. To filter out low-quality data for efficient RFT, we use the Qwen2.5VL-7B [4] model to generate 10 responses for each example and evaluate them using a rule-based reward function designed for unified action space modeling. We remove the problems with an estimated accuracy of 0 or 1 to ensure a stable training process, resulting in 140K low-level data and 1.5K high-level data. Since the quantity of low-level data far exceeds that of high-level data, we randomly sample 1.5K low-level data and combine it with all high-level data to create a balanced dataset of 3K high-quality training examples, named GUI-R1-3K. The distribution of image categories, action types, and corresponding difficulty levels is demonstrated in Figure 3. + +![](images/742f4555af634ff81413e847bb29c4c5604960cfdf6bbadf2119d5b60dcd2306.jpg) +(a) Image category quantity and difficulty distribution. + +![](images/eff777f438fcbcc5a1379031cf20f5447ef49173caac6a932d40410b6c6337c6.jpg) +(b) Action category quantity and difficulty distribution. +Figure 3: Illustrations of image and action category quantity and difficulty distributions in the dataset GUI-R1-3K. + +# 4 Experiments + +# 4.1 Implementation Details + +Training and inference details. For supervised fine-tuning (SFT), we use the QwenVL2.5-3B/7B [4] model as the base model for experiments and employ the LLaMA Factory [25] framework for one epoch of training to avoid overfitting. For RFT, we use the EasyR1 [26] framework for training over nine epochs. During inference, to ensure fairness, we apply a unified and simple prompt across all comparison methods, and conduct experiments under zero-shot prompt configurations. All experiments are conducted using $8 \times$ NVIDIA A100-80G GPUs. + +Evaluation benchmarks. We evaluate our model on eight agent benchmarks on three different platforms, including AndroidControl-Low [27], AndroidControl-High [27], GUI-Odyssey [28], ScreenSpot [3], ScreenSpot-Pro [29], GUI-Act-Web [30], OmniAct-Web [31], and OmniAct-Desktop [31]. We only use the test splits of these benchmarks for evaluation. + +Evaluation metrics. Following Os-Atlas [1], we use three commonly adopted metrics for GUI agents in evaluation: action type prediction accuracy, click point prediction accuracy, and step success rate, denoted as Type, Grounding, and SR, respectively. In more detail, Type measures the exact match score between the predicted action types (e.g., 'click' and 'scroll') and the ground truth. Grounding evaluates the performance of GUI grounding in downstream tasks. Besides, SR represents the step-wise success rate, where a step is deemed successful only if both the predicted action and its associated arguments (e.g., point for click actions and input text for scroll actions) are correct. + +# 4.2 Experimental Results + +We here evaluate our GUI-R1 model by comparing it with current state-of-the-art (SOTA) models on various tasks including GUI grounding tasks, GUI low-level tasks, and GUI high-level tasks. + +Grounding capability. We evaluate the grounding capability of GUI-R1 using ScreenSpot [3] and ScreenSpot-Pro [29]. ScreenSpot assesses GUI grounding performance across mobile, desktop, and web platforms, while ScreenSpot-Pro focuses on high-resolution professional environments, featuring expert-annotated tasks spanning 23 applications, five industries, and three operating systems. + +As shown in Table 1, compared to the previous SOTA model Os-Atlas-7B, which was trained with large-scale data using supervised fine-tuning (SFT), the RFT approach achieves superior performance on the 3B-sized Qwen2.5-VL model using only $0.2\%$ of the data (3K vs. 14M). Furthermore, compared to the base models QwenVL2.5-3B/7B and the SFT-trained QwenVL2.5* 3B/7B models using the same dataset, the RFT-based GUI-R1 demonstrates significantly better performance in GUI grounding tasks. Moreover, at the 3B scale, GUI-R1 achieves substantial gains over SFT models on ScreenSpot (80.08 vs. 63.55) and ScreenSpot-Pro (25.23 vs. 13.80), representing improvements of $26.3\%$ and $82.8\%$ , respectively. This highlights the effectiveness of the RL training framework in leveraging small-scale datasets to achieve significant performance improvements, which demonstrates its potential as a data-efficient and scalable approach for model training in resource-constrained environments. + +Low-level task capability. We evaluate the low-level task execution capability of GUI-R1 using four benchmark datasets: AndroidControl-Low [27], GUI-Act-Web [29], OmniAct-Web, and OmniAct-Desktop [31]. AndroidControl-Low evaluates low-level task execution on mobile platforms, while GUI-Act-Web and OmniAct-Web focus on low-level task execution on web platforms. OmniAct-Desktop is used to test low-level task execution on desktop platforms. + +As demonstrated in Table 2, our method impressively improves the success rate of GUI low-level tasks for 3B and 7B models, with the average success rate increasing from 55.65 to 80.88 at the 3B scale. Compared to UI-R1 [9], which is concurrent work also trained using RFT, our model achieves a 10-point improvement at the 3B scale, validating that RL training focused on high-level tasks can further enhance the model's understanding of low-level instructions. Note that an interesting observation is that the use of small-scale SFT data even leads to performance degradation on some metrics such as GR on AndroidControl-Low. This limitation stems from SFT's reliance on task-specific labeled data, which constrains the model's ability to adapt to unseen environments. In contrast, our RFT method not only enhances out-of-distribution (OOD) generalization by optimizing + +Table 1: GUI grounding accuracy on ScreenSpot and ScreenSpot-Pro. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold. + +
ModelsScreenSpot-ProScreenSpot
DevCreativeCADScientificOfficeOSWebDesktop
TextIconTextIconTextIconTextIconTextIconTextIconTextIconText
Supervised Fine-Tuning
SeeClick0.60.01.00.02.50.03.50.01.10.02.80.055.732.572.230.0
Os-Atlas-4B7.10.03.01.42.00.09.05.55.13.85.60.082.663.172.145.7
ShowUI-2B16.91.49.10.02.50.013.27.315.37.510.32.2----
CogAgent-18B14.90.79.60.07.13.122.21.813.00.05.60.070.428.674.220.0
Aria-GUI16.20.023.72.17.61.627.16.420.31.94.70.0----
UGround-7B26.62.127.32.814.21.631.92.731.611.317.80.080.470.482.563.6
Claude**22.03.925.93.414.53.733.915.830.116.311.04.5----
Os-Atlas-7B33.11.428.82.812.24.737.57.333.95.727.14.590.874.291.762.8
QwenVL2.5-3B*20.31.824.62.811.24.739.56.428.65.717.82.273.048.585.746.2
QwenVL2.5-7B*31.41.827.33.515.75.140.77.939.78.932.46.987.868.290.362.8
Zero Shot
QwenVL-7B0.00.00.00.00.00.00.70.00.00.00.00.0----
GPT-4o1.30.01.00.02.00.02.10.01.10.00.00.0----
QwenVL2.5-3B16.21.423.31.410.24.738.26.424.33.815.01.160.843.570.135.0
QwenVL2.5-7B33.12.123.73.512.26.336.87.337.87.530.86.986.965.189.760.0
Reinforcement Fine-Tuning
UI-R1-3B22.74.127.33.511.26.343.411.832.211.313.14.585.273.390.259.3
GUI-R1-3B33.84.840.95.626.47.861.817.353.617.028.15.689.672.193.864.8
GUI-R1-7B49.44.838.98.423.96.355.611.858.726.442.116.991.375.791.873.6
+ +Table 2: GUI low-level task accuracy on GUI-Act-Web, OmniAct-Web, OmniAct-Desktop, and AndroidControl-Low. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold. + +
ModelsGUI-Act-WebOmniAct-WebOmniAct-DesktopAndroidControl-LowOverall
TypeGRSRTypeGRSRTypeGRSRTypeGRSR
Supervised Fine-Tuning
Os-Atlas-4B79.2258.5742.6246.7449.2422.9963.3042.5526.9464.5871.1940.6250.71
Os-Atlas-7B86.9575.6157.0285.6369.3559.1590.2462.8756.7373.0073.3750.9470.07
QwenVL2.5-3B*76.9566.3461.6966.2456.9153.0277.6262.5463.7671.0874.5358.7965.79
QwenVL2.5-7B*87.6684.7779.8981.6273.4573.3986.2380.1779.8084.0085.7464.3280.09
Zero Shot
GPT-4o77.0945.0241.8479.3342.7934.0679.9763.2550.6774.3338.6728.3954.46
QwenVL2.5-3B56.1064.2855.6150.6346.8947.0256.9547.9746.8962.0374.0759.3255.65
QwenVL2.5-7B86.5984.3978.6379.1571.3271.2184.7479.8979.6683.4487.0862.5079.05
Reinforcement Fine-Tuning
UI-R1-3B75.8979.4367.3175.4261.3561.3373.4164.1263.9879.1582.4166.4470.85
GUI-R1-3B89.8687.4276.3188.5875.1075.0891.8678.3778.3183.6881.5964.4180.88
GUI-R1-7B90.8588.0680.3191.1677.2977.3592.2083.3683.3385.1784.0266.5283.30
+ +task-specific rewards but also achieves this with fewer training examples, which provides a scalable and efficient alternative to traditional SFT methods. + +High-level task capability. We evaluate the high-level task execution capability of GUI-R1 using AndroidControl-High [27] and GUI-Odyssey [28]. AndroidControl-High evaluates high-level task execution on mobile platforms, while GUI-Odyssey focuses on cross-app navigation scenarios, featuring high-level tasks spanning six applications and 203 apps. + +As shown in Table 3, due to our unified action space with rule-based reward modeling, GUI-R1 achieves SOTA on high-level GUI tasks. Compared to the closed-source model GPT-4o, our 3B-scale method achieves an absolute improvement of 21.06, demonstrating that RFT, in contrast to SFT, can efficiently and reliably enhance the success rate of GUI agents in real-world tasks. Furthermore, compared to UI-R1 [9], which focuses on improving low-level grounding capabilities, our model achieves an average improvement of 3.4 points at the 3B scale, with a particularly notable + +Table 3: GUI high-level task accuracy on AndroidControl-High and GUI-Odyssey. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold. + +
ModelsAndroidControl-HighGUI-OdysseyOverall
TypeGRSRTypeGRSR
Supervised Fine-Tuning
OS-Atlas-4B49.0149.5122.7749.6334.6320.2537.63
OS-Atlas-7B57.4454.9029.8360.4239.7426.9644.88
QwenVL2.5-3B*52.0549.5341.2243.6932.2127.3141.00
QwenVL2.5-7B*69.1558.6948.1156.7838.6534.4450.97
Zero Shot
GPT-4o63.0630.9021.1737.5014.175.3628.69
QwenVL2.5-3B47.8146.5138.9037.4026.4926.6937.30
QwenVL2.5-7B68.6759.7147.0655.6037.7834.3750.53
Reinforcement Fine-Tuning
UI-R1-3B57.8555.7045.4452.1634.4632.4946.35
GUI-R1-3B58.0456.2446.5554.8441.5241.3349.75
GUI-R1-7B71.6365.5651.6765.4943.6438.7956.13
+ +![](images/3a9ff69a46c697dceba16ce549cd2020caec5b420655f3f43a3832c8234e5942.jpg) +Figure 4: Ablation study of image resolution and data quality. + +$27.2\%$ lead in the step success rate on GUI-Odyssey. This indicates that RL training focused on low-level tasks is insufficient for handling complex high-level instructions. RFT designed for high-level tasks is better suited as a direction for developing GUI agent models. + +# 4.3 Ablation Study + +Image resolution and data quality. To investigate the impact of image resolution and data quality on GUI RFT, we conduct corresponding ablation experiments, with the results shown in Figure 4. As observed, when using the filtered GUI-R1-3K dataset, the model requires only a few updates to achieve relatively high rewards. In contrast, training with unfiltered and low-quality data necessitates significantly more training time for the model to converge, with a noticeably lower performance ceiling. To further explore the effect of image resolution on model training, we increase the image resolution to twice its original size (from 1,048,576 pixels to 2,097,152 pixels). As shown in Figure 4, because of the high resolution of GUI task images and the small size of many UI elements, increasing the image resolution allows the model to perceive these elements more clearly, which accelerates the convergence speed of RFT and improves the performance ceiling. + +Coefficients in the reward function. To explore the impact of the coefficients for format rewards and accuracy rewards in the reward function on the final performance, we conduct relevant ablation experiments, as shown in Table 4. The results indicate that reducing the coefficient ratio of format rewards leads to consistent performance improvements. This is because format rewards are easier to learn during training and often converge early in the process. By amplifying the accuracy rewards, the advantages of providing correct answers are further emphasized, ultimately leading to more performance improvements. + +Table 4: Ablation study of the coefficient $\alpha$ and $\beta$ in reward function. + +
αβAndroidControl-HighGUI-OdysseyOverall
TypeGRSRTypeGRSR
0.20.858.0456.2446.5554.8441.5241.3349.75
0.50.557.9355.9146.6252.7737.4435.6647.72
0.80.257.8555.7045.4452.1634.4632.4946.48
+ +# 4.4 Visualization + +In Figure 5, we provide additional visualization of the training process. As shown in Figure 5a and Figure 5b, it can be observed that the format reward converges quickly in the early stages of training, while the accuracy reward becomes the main source of differentiated rewards in the later stages of training. Furthermore, as illustrated in Figure 5d, the mean response length first decreases and then gradually increases, but the "aha moment" does not occur. This may be due to the single-image input training method in a non-interactive environment, which prevents the model from autonomously tracing back the sequence of incorrect actions. Exploring multi-image high-level tasks in interactive environments could be a potential direction for inducing the emergence of the "aha moment" in future research. + +![](images/e2de6aad495dae889bb09433213fc9eb2b0d78b38f07f8d686d2e22d3d1a9525.jpg) + +![](images/17f917d81e4fa2897b0b75f5c235d52a73e95bc59698049327863f8f4c7e5fdd.jpg) + +![](images/11c1746550af984bf27ac6ab2bfa8bc0572a3d1eff36d53b128703bd9ce95da9.jpg) +(a) Accuracy reward curve with training steps. +(c) PG loss curve with training steps. +Figure 5: Visualization of the training process of GUI-R1. To provide more details, we report the curves of GUI-R1's key metrics during training, including format reward, accuracy reward, mean response length, and policy gradient (PG) loss, as they vary with the training steps. + +![](images/3ccc48d75f9254f3950936c15024e9b372f78d1f4843180668d22b93c0ebadae.jpg) +(b) Format reward curve with training steps. +(d) Mean response length curve with training steps. + +# 5 Conclusion + +This paper presents GUI-R1, which is the first GUI reinforcement learning framework grounded in unified action space rule modeling. By integrating reinforcement fine-tuning with large vision-language models, GUI-R1 enables effective contextual action prediction and verifiable reward-driven learning in GUI environments. Extensive experiments demonstrate that GUI-R1 consistently outperforms baselines on various tasks. Moving forward, we plan to extend GUI-R1 to support collaborative multi-agent interaction and robust error correction policies, enabling the system to handle complex tasks with greater scalability. + +# References + +[1] Zhiyong Wu, Zhenyu Wu, Fangzhi Xu, Yian Wang, Qiushi Sun, Chengyou Jia, Kanzhi Cheng, Zichen Ding, Liheng Chen, Paul Pu Liang, et al. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218, 2024. +[2] Yujia Qin, Yining Ye, Junjie Fang, Haoming Wang, Shihao Liang, Shizuo Tian, Junda Zhang, Jiahao Li, Yunxin Li, Shijue Huang, et al. Ui-tars: Pioneering automated gui interaction with native agents. arXiv preprint arXiv:2501.12326, 2025. +[3] Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935, 2024. +[4] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. +[5] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. +[6] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. +[7] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02. +[8] Haozhan Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, 2025. Accessed: 2025-02-15. +[9] Zhengxi Lu, Yuxiang Chai, Yaxuan Guo, Xi Yin, Liang Liu, Hao Wang, Guanjing Xiong, and Hongsheng Li. Ui-r1: Enhancing action prediction of gui agents by reinforcement learning. arXiv preprint arXiv:2503.21620, 2025. +[10] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +[11] Theodore Sumers, Shunyu Yao, Karthik Narasimhan, and Thomas Griffiths. Cognitive architectures for language agents. Transactions on Machine Learning Research, 2023. +[12] Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, et al. A survey on large language model based autonomous agents. Frontiers of Computer Science, 18(6):186345, 2024. +[13] Qiushi Sun, Zhangyue Yin, Xiang Li, Zhiyong Wu, Xipeng Qiu, and Lingpeng Kong. Corex: Pushing the boundaries of complex reasoning through multi-model collaboration. arXiv preprint arXiv:2310.00280, 2023. +[14] Boyu Gou, Ruohan Wang, Boyuan Zheng, Yanan Xie, Cheng Chang, Yiheng Shu, Huan Sun, and Yu Su. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243, 2024. +[15] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024. +[16] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. + +[17] Jiawei Liu and Lingming Zhang. Code-r1: Reproducing r1 for code with reliable rewards. arXiv preprint arXiv:2503.18470, 2025. +[18] Zihan Wang*, Kangrui Wang*, Qineng Wang*, Pingyue Zhang*, Linjie Li*, Zhengyuan Yang, Kefan Yu, Minh Nhat Nguyen, Monica Lam, Yiping Lu, Kyunghyun Cho, Jiajun Wu, Li Fei-Fei, Lijuan Wang, Yejin Choi, and Manling Li. Training agents by reinforcing reasoning, 2025. +[19] Zhenyu Pan and Han Liu. Metaspatial: Reinforcing 3d spatial reasoning in vlms for the meta-verse. arXiv preprint arXiv:2503.18470, 2025. +[20] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025. +[21] Guilherme Penedo, Hynek Kydlicek, Anton Lozhkov, Margaret Mitchell, Colin A Raffel, Leandro Von Werra, Thomas Wolf, et al. The fineweb datasets: Decanting the web for the finest text data at scale. In NeurIPS, pages 30811-30849, 2024. +[22] Chongyang Bai, Xiaoxue Zang, Ying Xu, Srinivas Sunkara, Abhinav Rastogi, Jindong Chen, and Blaise Aguera y Arcas. Uibert: Learning generic multimodal representations for ui understanding, 2021. +[23] Yuxiang Chai, Siyuan Huang, Yazhe Niu, Han Xiao, Liang Liu, Dingyu Zhang, Peng Gao, Shuai Ren, and Hongsheng Li. Amex: Android multi-annotation expo dataset for mobile gui agents. arXiv preprint arXiv:2407.17490, 2024. +[24] Yang Li, Jiacong He, Xin Zhou, Yuan Zhang, and Jason Baldridge. Mapping natural language instructions to mobile ui action sequences. arXiv preprint arXiv:2005.03776, 2020. +[25] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of $100+$ language models. In ACL, 2024. +[26] Yaowei Zheng, Junting Lu, Shenzhi Wang, and Y Xiong. Easyr1: An efficient, scalable, multimodality rl training framework, 2025. +[27] Wei Li, William Bishop, Alice Li, Chris Rawles, Folawiyo Campbell-Ajala, Divya Tyamagundlu, and Oriana Riva. On the effects of data scale on computer control agents. arXiv preprint arXiv:2406.03679, 2024. +[28] Quanfeng Lu, Wenqi Shao, Zitao Liu, Fanqing Meng, Boxuan Li, Botong Chen, Siyuan Huang, Kaipeng Zhang, Yu Qiao, and Ping Luo. Gui odyssey: A comprehensive dataset for cross-app gui navigation on mobile devices. arXiv preprint arXiv:2406.08451, 2024. +[29] Kaixin Li, Ziyang Meng, Hongzhan Lin, Ziyang Luo, Yuchen Tian, Jing Ma, Zhiyong Huang, and Tat-Seng Chua. Screenshot-pro: Gui grounding for professional high-resolution computer use. Workshop on Reasoning and Planning for Large Language Models, 2025. +[30] Wentong Chen, Junbo Cui, Jinyi Hu, Yujia Qin, Junjie Fang, Yue Zhao, Chongyi Wang, Jun Liu, Guirong Chen, Yupeng Huo, et al. Guicourse: From general vision language models to versatile gui agents. arXiv preprint arXiv:2406.11317, 2024. +[31] Raghav Kapoor, Yash Parag Butala, Melisa Russak, Jing Yu Koh, Kiran Kamble, Waseem Al-Shikh, and Ruslan Salakhutdinov. Omniact: A dataset and benchmark for enabling multimodal generalist autonomous agents for desktop and web. In ECCV, pages 161-178. Springer, 2024. \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10458/images/11c1746550af984bf27ac6ab2bfa8bc0572a3d1eff36d53b128703bd9ce95da9.jpg b/data/2025/2504_10xxx/2504.10458/images/11c1746550af984bf27ac6ab2bfa8bc0572a3d1eff36d53b128703bd9ce95da9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e40761d70f3804a2423f96d67ba417167b1a0a68 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/11c1746550af984bf27ac6ab2bfa8bc0572a3d1eff36d53b128703bd9ce95da9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a7bf13c86d1b12375a8aa0759f47c1a79ab0eff0e396230ff4a652bc08e2ec3 +size 19057 diff --git a/data/2025/2504_10xxx/2504.10458/images/17f917d81e4fa2897b0b75f5c235d52a73e95bc59698049327863f8f4c7e5fdd.jpg b/data/2025/2504_10xxx/2504.10458/images/17f917d81e4fa2897b0b75f5c235d52a73e95bc59698049327863f8f4c7e5fdd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71a3679cd28289e655ce3607720fcae22c8dd71e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/17f917d81e4fa2897b0b75f5c235d52a73e95bc59698049327863f8f4c7e5fdd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecdcefd519bcd8a377385b247367efc568f15ec17af3e316145b5a235fb3c251 +size 13183 diff --git a/data/2025/2504_10xxx/2504.10458/images/3a9ff69a46c697dceba16ce549cd2020caec5b420655f3f43a3832c8234e5942.jpg b/data/2025/2504_10xxx/2504.10458/images/3a9ff69a46c697dceba16ce549cd2020caec5b420655f3f43a3832c8234e5942.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b74c4be0eda698ffcd8c836dc1db425324a56ae --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/3a9ff69a46c697dceba16ce549cd2020caec5b420655f3f43a3832c8234e5942.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f91eea82cf2e460a4294989c5f6d898ff79aaccf77d2cdb1fe39fcbc1e86c1fd +size 23961 diff --git a/data/2025/2504_10xxx/2504.10458/images/3ccc48d75f9254f3950936c15024e9b372f78d1f4843180668d22b93c0ebadae.jpg b/data/2025/2504_10xxx/2504.10458/images/3ccc48d75f9254f3950936c15024e9b372f78d1f4843180668d22b93c0ebadae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..094bd1a079c8d67d69c0ea27f8f14b698d909cc2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/3ccc48d75f9254f3950936c15024e9b372f78d1f4843180668d22b93c0ebadae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b58ff74fa9c290093563f11556503efe05d6003e785bb4b0282acce73738661c +size 15859 diff --git a/data/2025/2504_10xxx/2504.10458/images/512270404c63b21f462da324a0f27a925ac93742bbd95f28afe9e2cfeb780bbe.jpg b/data/2025/2504_10xxx/2504.10458/images/512270404c63b21f462da324a0f27a925ac93742bbd95f28afe9e2cfeb780bbe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..72a51d8d324c2cd945306f7aa02749c002a8f10c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/512270404c63b21f462da324a0f27a925ac93742bbd95f28afe9e2cfeb780bbe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98f9c23ebdc85d61760dd5579b7bfc6acda64a671fe96c90aeac919375f87d7a +size 20800 diff --git a/data/2025/2504_10xxx/2504.10458/images/677a72e3e956b7483678153e39f5b66f055fc6b45c91e7eb23231ce37ea8b638.jpg b/data/2025/2504_10xxx/2504.10458/images/677a72e3e956b7483678153e39f5b66f055fc6b45c91e7eb23231ce37ea8b638.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04739ce2e5f43aa8cecae7f1e66b6c3892893e1e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/677a72e3e956b7483678153e39f5b66f055fc6b45c91e7eb23231ce37ea8b638.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:375b76f609beda1ffdd42b6557bbcbc41da220cdf0d475f36c39826ce29f5675 +size 5314 diff --git a/data/2025/2504_10xxx/2504.10458/images/6acb2fc7a5adc803fcea522323e025c5759b46399059b10a650a972caa07f27d.jpg b/data/2025/2504_10xxx/2504.10458/images/6acb2fc7a5adc803fcea522323e025c5759b46399059b10a650a972caa07f27d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a94f814097a2884eedaed50e43e6f93a93e64be --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/6acb2fc7a5adc803fcea522323e025c5759b46399059b10a650a972caa07f27d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95a74252055a34598775e592f630c60a4e21f4d750d355b1c3b4ff49718416b3 +size 6075 diff --git a/data/2025/2504_10xxx/2504.10458/images/742f4555af634ff81413e847bb29c4c5604960cfdf6bbadf2119d5b60dcd2306.jpg b/data/2025/2504_10xxx/2504.10458/images/742f4555af634ff81413e847bb29c4c5604960cfdf6bbadf2119d5b60dcd2306.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4467faaf5aaa2fb9dffc2e4cd9fd425b1fbb95b0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/742f4555af634ff81413e847bb29c4c5604960cfdf6bbadf2119d5b60dcd2306.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f925378112d5e48319e3815f1a854b874d923af3c5f9e80f111a41081854126d +size 22794 diff --git a/data/2025/2504_10xxx/2504.10458/images/9fc200b1f5a26e06c697f75304bb82ca2b26802b7fc14558a867a6658bcecfc3.jpg b/data/2025/2504_10xxx/2504.10458/images/9fc200b1f5a26e06c697f75304bb82ca2b26802b7fc14558a867a6658bcecfc3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0ab46e039ee92704abb6da2ff4bdd4173eceb0d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/9fc200b1f5a26e06c697f75304bb82ca2b26802b7fc14558a867a6658bcecfc3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24a61ec8e5fd37a4fe01bcc3ba271842b9604a457df97d6236cf2d77a412e0bb +size 59239 diff --git a/data/2025/2504_10xxx/2504.10458/images/a343d8658955f5376bd79dff57d34f2952e7c91622500bc8d7130f3f5cedf694.jpg b/data/2025/2504_10xxx/2504.10458/images/a343d8658955f5376bd79dff57d34f2952e7c91622500bc8d7130f3f5cedf694.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c0bc062636b9d03d678b83db61a45fa1736e35c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/a343d8658955f5376bd79dff57d34f2952e7c91622500bc8d7130f3f5cedf694.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:440a3ba35bca5cadfd0c36998de0fb82edd84c4dd556ae4a3b7fba07acdc155d +size 22361 diff --git a/data/2025/2504_10xxx/2504.10458/images/b3b746105ea2ffcd1fb7f8ee095e4365304aba370c8b0e79c09e244405980cdc.jpg b/data/2025/2504_10xxx/2504.10458/images/b3b746105ea2ffcd1fb7f8ee095e4365304aba370c8b0e79c09e244405980cdc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..754b0c758e75c8101859d97d11c929ee66a0170d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/b3b746105ea2ffcd1fb7f8ee095e4365304aba370c8b0e79c09e244405980cdc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:751f0cfab0c048340ca85148f4b52a00f35d813482681dc73b9fa682dd9259bc +size 31099 diff --git a/data/2025/2504_10xxx/2504.10458/images/c3403bda7e5172ff6e766ccea164a53224c7fc4715819b793c1e3cfb633040e8.jpg b/data/2025/2504_10xxx/2504.10458/images/c3403bda7e5172ff6e766ccea164a53224c7fc4715819b793c1e3cfb633040e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e40a50abccfdc2bf7d64a1d164e9f557a83872da --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/c3403bda7e5172ff6e766ccea164a53224c7fc4715819b793c1e3cfb633040e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8811e0892fc8c8b052a540d9510a841b03b884180dc48e0e5d617a3d2cafd050 +size 6838 diff --git a/data/2025/2504_10xxx/2504.10458/images/c6b9af241d50e031e4855976a092383154a95e81b4c574bc8196fcbcc277f33b.jpg b/data/2025/2504_10xxx/2504.10458/images/c6b9af241d50e031e4855976a092383154a95e81b4c574bc8196fcbcc277f33b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..58a9a45acc604f7da8c736b3e4389fc8e1d71a98 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/c6b9af241d50e031e4855976a092383154a95e81b4c574bc8196fcbcc277f33b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ebd0aef20f4fd736aeeeb800b5d749077628a9d6b11db36bca33da74035adc5 +size 12931 diff --git a/data/2025/2504_10xxx/2504.10458/images/cafd0148da670145984d2db9becf6696e576a880ea6b7afab20c4ab996d6e76f.jpg b/data/2025/2504_10xxx/2504.10458/images/cafd0148da670145984d2db9becf6696e576a880ea6b7afab20c4ab996d6e76f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a06190d6bfa8adcd440fa58be6ece3291c21cbc1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/cafd0148da670145984d2db9becf6696e576a880ea6b7afab20c4ab996d6e76f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61a5c02e823929bd8ed819eecef5410c43077aac87011b65aef6f10fdf8d9264 +size 160033 diff --git a/data/2025/2504_10xxx/2504.10458/images/d0a5c889d078984ece46a418434aaa9819e88f9e9d29655aaa178ba48ae34a91.jpg b/data/2025/2504_10xxx/2504.10458/images/d0a5c889d078984ece46a418434aaa9819e88f9e9d29655aaa178ba48ae34a91.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fe6bd50c1cf7889350ec81daf730938ca43fa48 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/d0a5c889d078984ece46a418434aaa9819e88f9e9d29655aaa178ba48ae34a91.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de05316341e1cfac4c9570db6f08ad7f6c0b170bd5326793a0292f503f4b584a +size 100410 diff --git a/data/2025/2504_10xxx/2504.10458/images/e2de6aad495dae889bb09433213fc9eb2b0d78b38f07f8d686d2e22d3d1a9525.jpg b/data/2025/2504_10xxx/2504.10458/images/e2de6aad495dae889bb09433213fc9eb2b0d78b38f07f8d686d2e22d3d1a9525.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32e28c475780f9803fdc08f97ca0abbedf4e64fa --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/e2de6aad495dae889bb09433213fc9eb2b0d78b38f07f8d686d2e22d3d1a9525.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6844384f96bb4f01ebb461a6e18aa5c5ed18d8186feac715115fa83b9f8256a9 +size 17686 diff --git a/data/2025/2504_10xxx/2504.10458/images/eff777f438fcbcc5a1379031cf20f5447ef49173caac6a932d40410b6c6337c6.jpg b/data/2025/2504_10xxx/2504.10458/images/eff777f438fcbcc5a1379031cf20f5447ef49173caac6a932d40410b6c6337c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93dd034a11819f89877d12713bf2e2230e9c7782 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/eff777f438fcbcc5a1379031cf20f5447ef49173caac6a932d40410b6c6337c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f9680fd3180b4fe3dc81c01f01737c74099ae5ea19b082c6dbb399c053d9c97 +size 23266 diff --git a/data/2025/2504_10xxx/2504.10458/images/fc5cfb0efb77c23d45296aad426bb409af65cd88b79315371f87476014280469.jpg b/data/2025/2504_10xxx/2504.10458/images/fc5cfb0efb77c23d45296aad426bb409af65cd88b79315371f87476014280469.jpg new file mode 100644 index 0000000000000000000000000000000000000000..961e951c91673399444a503b6199d0ad717d070e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/images/fc5cfb0efb77c23d45296aad426bb409af65cd88b79315371f87476014280469.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:240e1eb63f5c93a08c8600b3a94a325a4aa96d5c834cf113925dec68bc30bab2 +size 111750 diff --git a/data/2025/2504_10xxx/2504.10458/layout.json b/data/2025/2504_10xxx/2504.10458/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d5bdf74a3bb59c53f62756dfdcf45a1af3fa5272 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10458/layout.json @@ -0,0 +1,6503 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 124, + 97, + 487, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 97, + 487, + 138 + ], + "spans": [ + { + "bbox": [ + 124, + 97, + 487, + 138 + ], + "type": "text", + "content": "GUI-R1: A Generalist R1-Style Vision-Language Action Model For GUI Agents" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "spans": [ + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "type": "text", + "content": "Run Luo" + }, + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "type": "text", + "content": " Lu Wang" + }, + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "type": "text", + "content": " Wanwei He" + }, + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "type": "text", + "content": " Longze Chen" + }, + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "type": "text", + "content": " Jiaming Li" + }, + { + "bbox": [ + 149, + 177, + 459, + 189 + ], + "type": "inline_equation", + "content": "^{1,2}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 250, + 190, + 361, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 190, + 361, + 201 + ], + "spans": [ + { + "bbox": [ + 250, + 190, + 361, + 201 + ], + "type": "text", + "content": "Min Yang" + }, + { + "bbox": [ + 250, + 190, + 361, + 201 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 250, + 190, + 361, + 201 + ], + "type": "text", + "content": " Xiaobo Xia" + }, + { + "bbox": [ + 250, + 190, + 361, + 201 + ], + "type": "inline_equation", + "content": "^{3}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 167, + 202, + 444, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 202, + 444, + 213 + ], + "spans": [ + { + "bbox": [ + 167, + 202, + 444, + 213 + ], + "type": "text", + "content": "1Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 223, + 213, + 387, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 213, + 387, + 224 + ], + "spans": [ + { + "bbox": [ + 223, + 213, + 387, + 224 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 223, + 213, + 387, + 224 + ], + "type": "text", + "content": "University of Chinese Academy of Sciences" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 243, + 224, + 368, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 224, + 368, + 235 + ], + "spans": [ + { + "bbox": [ + 243, + 224, + 368, + 235 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 243, + 224, + 368, + 235 + ], + "type": "text", + "content": "National University of Singapore" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 154, + 235, + 456, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 235, + 456, + 246 + ], + "spans": [ + { + "bbox": [ + 154, + 235, + 456, + 246 + ], + "type": "inline_equation", + "content": "\\left\\{\\mathrm{R.LUO@SIAT.AC.CN}\\right." + }, + { + "bbox": [ + 154, + 235, + 456, + 246 + ], + "type": "text", + "content": " M.YANG@SIAT.AC.CN XIAOBOXIA.UNI@GMAIL.COM}" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 281, + 274, + 329, + 286 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 274, + 329, + 286 + ], + "spans": [ + { + "bbox": [ + 281, + 274, + 329, + 286 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 298, + 469, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 298, + 469, + 529 + ], + "spans": [ + { + "bbox": [ + 140, + 298, + 469, + 529 + ], + "type": "text", + "content": "Existing efforts in building graphical user interface (GUI) agents largely rely on the training paradigm of supervised fine-tuning (SFT) on large vision-language models (LVLMs). However, this approach not only demands extensive amounts of training data but also struggles to effectively understand GUI screenshots and generalize to unseen interfaces. The issue significantly limits its application in real-world scenarios, especially for high-level tasks. Inspired by reinforcement fine-tuning (RFT) in large reasoning models (e.g., DeepSeek-R1), which efficiently enhances the problem-solving capabilities of large language models in real-world settings, we propose GUI-R1, the first reinforcement learning framework designed to enhance the GUI capabilities of LVLMs in high-level real-world task scenarios, through unified action space rule modeling. By leveraging a small amount of carefully curated high-quality data across multiple platforms (including Windows, Linux, MacOS, Android, and Web) and employing policy optimization algorithms such as group relative policy optimization (GRPO) to update the model, GUI-R1 achieves superior performance using only " + }, + { + "bbox": [ + 140, + 298, + 469, + 529 + ], + "type": "inline_equation", + "content": "0.02\\%" + }, + { + "bbox": [ + 140, + 298, + 469, + 529 + ], + "type": "text", + "content": " of the data (3K vs. 13M) compared to previous state-of-the-art methods like OS-Atlas across eight benchmarks spanning three different platforms (mobile, desktop, and web). These results demonstrate the immense potential of reinforcement learning based on unified action space rule modeling in improving the execution capabilities of LVLMs for real-world GUI agent tasks. The codebase is available at https://github.com/ritzzz-ai/GUI-R1.git." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 552, + 192, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 552, + 192, + 565 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 192, + 565 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 577, + 504, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 577, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 504, + 677 + ], + "type": "text", + "content": "Recent studies [1; 2; 3] have explored the use of large vision-language models (LVLMs) [4] to develop graphical user interface (GUI) agents capable of performing high-level complex tasks. These agents analyze the screen as a self-contained source of information for decision-making, without relying on environment-based textual descriptions such as HTML or accessibility trees. This approach offers greater flexibility in agent decision-making. However, previous works have predominantly relied on the training paradigm of supervised fine-tuning (SFT), which not only requires large amounts of high-quality training data but also struggles to effectively comprehend GUI screenshots and generalize to unseen interfaces. These limitations have significantly hindered the real-world applicability of these works, particularly for high-level GUI tasks that lack explicit step-by-step instructions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 681, + 504, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 504, + 715 + ], + "type": "text", + "content": "Rule-based reinforcement fine-tuning has recently emerged as an efficient and scalable alternative to SFT, requiring only a small number of examples to fine-tune models effectively while demonstrating strong performance and generalization capabilities in domain-specific tasks. RFT has been increas" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 224, + 35, + 563 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 224, + 35, + 563 + ], + "spans": [ + { + "bbox": [ + 14, + 224, + 35, + 563 + ], + "type": "text", + "content": "arXiv:2504.10458v4 [cs.CV] 1 Oct 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 731, + 168, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 168, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 168, + 742 + ], + "type": "text", + "content": "Technical report." + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 236, + 199 + ], + "blocks": [ + { + "bbox": [ + 107, + 81, + 236, + 199 + ], + "lines": [ + { + "bbox": [ + 107, + 81, + 236, + 199 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 236, + 199 + ], + "type": "image", + "image_path": "a343d8658955f5376bd79dff57d34f2952e7c91622500bc8d7130f3f5cedf694.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 124, + 202, + 217, + 214 + ], + "lines": [ + { + "bbox": [ + 124, + 202, + 217, + 214 + ], + "spans": [ + { + "bbox": [ + 124, + 202, + 217, + 214 + ], + "type": "text", + "content": "(a) Grounding capability." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 241, + 76, + 370, + 200 + ], + "blocks": [ + { + "bbox": [ + 241, + 76, + 370, + 200 + ], + "lines": [ + { + "bbox": [ + 241, + 76, + 370, + 200 + ], + "spans": [ + { + "bbox": [ + 241, + 76, + 370, + 200 + ], + "type": "image", + "image_path": "512270404c63b21f462da324a0f27a925ac93742bbd95f28afe9e2cfeb780bbe.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 251, + 202, + 359, + 214 + ], + "lines": [ + { + "bbox": [ + 251, + 202, + 359, + 214 + ], + "spans": [ + { + "bbox": [ + 251, + 202, + 359, + 214 + ], + "type": "text", + "content": "(b) Low-level task capability." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 376, + 72, + 504, + 198 + ], + "blocks": [ + { + "bbox": [ + 376, + 72, + 504, + 198 + ], + "lines": [ + { + "bbox": [ + 376, + 72, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 376, + 72, + 504, + 198 + ], + "type": "image", + "image_path": "c6b9af241d50e031e4855976a092383154a95e81b4c574bc8196fcbcc277f33b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 385, + 202, + 494, + 214 + ], + "lines": [ + { + "bbox": [ + 385, + 202, + 494, + 214 + ], + "spans": [ + { + "bbox": [ + 385, + 202, + 494, + 214 + ], + "type": "text", + "content": "(c) High-level task capability." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 220, + 504, + 243 + ], + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 243 + ], + "type": "text", + "content": "Figure 1: GUI-R1 achieves the best performance on eight evaluation datasets covering various platforms and task granularities, demonstrating the promising potential of RFT in GUI agent tasks." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 263, + 506, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 263, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 506, + 373 + ], + "type": "text", + "content": "ingly adopted for developing various LVLMs [5; 6; 7; 8; 9]. Inspired by these advancements, this study extends the rule-based reinforcement learning (RL) paradigm to the domain of GUI agents, which focuses on GUI action prediction tasks within a unified action space driven by high-level instructions. Specifically, LVLMs generate multiple responses (trajectories) for each input, containing both reasoning traces and final answers. These responses are evaluated using a unified action space reward function designed in this work, and the model is updated through policy optimization [10]. This iterative self-learning process enhances the model's reasoning capabilities in action prediction and its generalization to out-of-distribution (OOD) scenarios. By modeling a unified action space, we efficiently curate high-quality data spanning multiple platforms, including Windows, Linux, MacOS, Android, and Web, while avoiding action prediction conflicts across different platforms." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 378, + 504, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 504, + 434 + ], + "type": "text", + "content": "As demonstrated in Figure 1, the proposed framework (GUI-R1) achieves superior performance using only " + }, + { + "bbox": [ + 104, + 378, + 504, + 434 + ], + "type": "inline_equation", + "content": "0.02\\%" + }, + { + "bbox": [ + 104, + 378, + 504, + 434 + ], + "type": "text", + "content": " of the data (3K vs. 13M) compared to previous state-of-the-art methods like OSAtlas [1] across eight benchmarks covering three different platforms (mobile, desktop, and web) and three levels of task granularity (low-level grounding, low-level tasks, and high-level tasks). Before delving into details, we clearly emphasize our contribution as follows." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 441, + 504, + 596 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 132, + 441, + 504, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 441, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 132, + 441, + 504, + 464 + ], + "type": "text", + "content": "- We propose GUI-R1, the first framework that utilizes rule-based reinforcement fine-tuning to enhance the reasoning capabilities of LVLMs in high-level GUI action prediction tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 468, + 504, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 468, + 504, + 501 + ], + "spans": [ + { + "bbox": [ + 132, + 468, + 504, + 501 + ], + "type": "text", + "content": "- We design a rule-based unified action space reward function, which efficiently validates GUI task responses across different platforms and task granularities. This ensures reliable and efficient data selection and model training." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 504, + 504, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 504, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 132, + 504, + 504, + 538 + ], + "type": "text", + "content": "- Leveraging the rule-based unified action space reward function, we construct GUI-R1-3K, which is a high-quality fine-tuning dataset with diversity and complexity. This dataset significantly improves both training efficiency and model performance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 541, + 504, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 541, + 504, + 596 + ], + "spans": [ + { + "bbox": [ + 132, + 541, + 504, + 596 + ], + "type": "text", + "content": "- We conduct a comprehensive evaluation of GUI agents, covering three distinct platforms (desktop, mobile, and web) and three levels of task granularity (low-level grounding, low-level tasks, and high-level tasks) across eight benchmarks. Experimental results demonstrate that our GUI-R1 is leading in multiple realistic cases. This creates a strong baseline of GUI agents for future research." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 611, + 197, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 611, + 197, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 197, + 624 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 635, + 181, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 181, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 181, + 647 + ], + "type": "text", + "content": "2.1 GUI Agents" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 655, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 504, + 723 + ], + "type": "text", + "content": "Autonomous agents driven by large foundation models (e.g., large language models (LLMs) and large vision-language models (LVLMs)) have gained significant attention for their powerful interactive capabilities [11]. These operating systems via programs or API calls [12; 13]. However, the closed-source nature of most commercial software limits access to internal APIs or code, which promotes a shift in research toward GUI agents. Different from traditional programmatic agents, GUI agents simulate human interactions via mouse and keyboard inputs, which enable broader flexibility" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 182 + ], + "type": "text", + "content": "in solving complex tasks. Recent works have advanced this direction. For instance, UGround [14] developed a specialized GUI grounding model for precise GUI element localization. OS-Atlas [1] introduced large action models to handle general agent tasks by interpreting human intentions and predicting actions in the form of function calls. UITars [2] proposed a more comprehensive method by combining GUI-related pretraining with task-level reasoning fine-tuning to better capture the complexity of GUI interactions. Nevertheless, these methods all rely on the paradigm of supervised fine-tuning (SFT), which suffers from two main limitations: (1) the training process requires vast amounts of diverse data; (2) the models exhibit limited generalization capabilities, which struggle to understand GUI screenshots and adapt to unseen interfaces. These limitations motivate the development of a more advanced learning paradigm for GUI agents beyond traditional SFT methods." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 195, + 248, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 195, + 248, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 195, + 248, + 208 + ], + "type": "text", + "content": "2.2 Reinforcement Fine-Tuning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 216, + 504, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 504, + 358 + ], + "type": "text", + "content": "Rule-based reinforcement fine-tuning, exemplified by OpenAI o1 [15] and DeepSeek-R1 [10], has demonstrated strong performance in mathematical reasoning [16], code generation [17], and multi-step logic tasks [18]. Subsequent studies have extended this paradigm to multimodal models by designing task-specific reward functions for vision-based tasks, such as correct class prediction in image classificati [19; 7; 20], intersection-over-union (IoU) metrics in image localization and detection [6; 5], and accurate click position prediction in low-level GUI grounding tasks [9]. These works demonstrate that verifiable reward signals, e.g., symbolic correctness or execution-based feedback, can effectively substitute traditional supervision. Despite the strong potential of RFT in various tasks, it remains underexplored in complex high-level GUI agent tasks. Compared to other domains, building intelligent agents for high-level GUI tasks is particularly challenging due to diverse UI layouts, implicit task semantics, and long-horizon action dependencies. This imposes higher demands on the model's contextual learning and understanding capabilities. To the best of our knowledge, GUI-R1 is the first RFT-based framework specifically designed for high-level GUI agents." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 374, + 231, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 374, + 231, + 387 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 231, + 387 + ], + "type": "text", + "content": "3 GUI-R1 Framework" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 110, + 404, + 500, + 544 + ], + "blocks": [ + { + "bbox": [ + 110, + 404, + 500, + 544 + ], + "lines": [ + { + "bbox": [ + 110, + 404, + 500, + 544 + ], + "spans": [ + { + "bbox": [ + 110, + 404, + 500, + 544 + ], + "type": "image", + "image_path": "9fc200b1f5a26e06c697f75304bb82ca2b26802b7fc14558a867a6658bcecfc3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 552, + 504, + 597 + ], + "lines": [ + { + "bbox": [ + 104, + 552, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 504, + 597 + ], + "type": "text", + "content": "Figure 2: Overview of the GUI-R1 Framework. Given the high-level instruction, action history, and visual image inputs, the policy model generates multiple responses containing reasoning steps. Then the verifiable rewards, such as action type reward, click point reward, and input text reward, are used with the policy gradient optimization algorithm to update the policy model." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 610, + 504, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 610, + 504, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 610, + 504, + 667 + ], + "type": "text", + "content": "GUI-R1 is based on a reinforcement learning training paradigm designed to enhance the ability of GUI agents to complete sophisticated instructional tasks. As shown in Figure 2, unlike low-level tasks, high-level GUI tasks lack explicit and fine-grained instructions, which require action predictions based on high-level task objectives and execution history. This imposes greater demands on the model's contextual learning and understanding capabilities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 679, + 188, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 188, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 188, + 690 + ], + "type": "text", + "content": "3.1 Preliminaries" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "We define the goal of GUI agents in high-level instructional tasks as understanding and executing low-level instructions to complete the high-level task " + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": ", based on the current interface image " + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "inline_equation", + "content": "I" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": "and the execution history " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ". Formally, given the input " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ", the model generates a set of candidate responses " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "O = \\{o_1, o_2, \\dots, o_N\\}" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ", where each response contains attributes of the predicted low-level action " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "o^{\\mathrm{act}}" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ", input text " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "o^{\\mathrm{text}}" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ", and input point " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "o^{\\mathrm{point}}" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ". Each response is evaluated using a unified action space reward function to compute its reward " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "\\{r_1, r_2, \\dots, r_N\\}" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ". GRPO [10] is applied to estimate advantages and update the policy model under KL divergence constraints. The relative advantage " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "A_i" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": " of the " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": "-th response is calculated as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 231, + 152, + 378, + 178 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 152, + 378, + 178 + ], + "spans": [ + { + "bbox": [ + 231, + 152, + 378, + 178 + ], + "type": "interline_equation", + "content": "A _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} (\\{r _ {1} , r _ {2} , \\ldots , r _ {N} \\})}{\\operatorname {s t d} (\\{r _ {1} , r _ {2} , \\ldots , r _ {N} \\})},", + "image_path": "c3403bda7e5172ff6e766ccea164a53224c7fc4715819b793c1e3cfb633040e8.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 184, + 465, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 184, + 465, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 184, + 465, + 196 + ], + "type": "text", + "content": "where mean and std denote the mean and standard deviation of the rewards, respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 207, + 315, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 207, + 315, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 207, + 315, + 220 + ], + "type": "text", + "content": "3.2 Verifiable Rewards in Unified Action Space" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 228, + 504, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 504, + 293 + ], + "type": "text", + "content": "We adopt a unified action space modeling strategy, which extracts action space categories across different platforms and integrates them into a unified action space. This ensures that all high-level instructions can be decomposed into a sequence of atomic actions, resolving action space conflicts in multi-platform data joint training. Based on the unified action space, we design verifiable reward functions to evaluate the accuracy of predicted actions to guide reinforcement learning. We detail these verifiable rewards below." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 299, + 504, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 299, + 504, + 375 + ], + "spans": [ + { + "bbox": [ + 104, + 299, + 504, + 375 + ], + "type": "text", + "content": "Format reward. Following previous work [20; 10; 6], we introduce format rewards during training to evaluate whether the generated output adheres to the expected structural format, including both syntactic and semantic validity. Specifically, format rewards guide the model to generate reasoning processes and final answers in a structured format, which play a critical role in self-learning and iterative improvement during reinforcement fine-tuning. The format reward templates used in training and inference are as follows, where ‘’ represents the reasoning process and ‘’ represents the final answer." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 125, + 384, + 373, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 384, + 373, + 395 + ], + "spans": [ + { + "bbox": [ + 125, + 384, + 373, + 395 + ], + "type": "text", + "content": "Unified Action Space Prompt for Task Training and Inference" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 120, + 400, + 490, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 400, + 490, + 465 + ], + "spans": [ + { + "bbox": [ + 120, + 400, + 490, + 465 + ], + "type": "text", + "content": "You are GUI-R1, a reasoning GUI Agent Assistant. In this UI screenshot " + }, + { + "bbox": [ + 120, + 400, + 490, + 465 + ], + "type": "inline_equation", + "content": "<" + }, + { + "bbox": [ + 120, + 400, + 490, + 465 + ], + "type": "text", + "content": " image " + }, + { + "bbox": [ + 120, + 400, + 490, + 465 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 120, + 400, + 490, + 465 + ], + "type": "text", + "content": ", I want you to continue executing the command task, with the action history being history. Please provide the action to perform (enterprise from [complete, close/delete, press_home, click, press_back, type, select, scroll, enter]), the point where the cursor is moved to (integer) if a click is performed, and any input text required to complete the action." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 120, + 466, + 490, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 466, + 490, + 522 + ], + "spans": [ + { + "bbox": [ + 120, + 466, + 490, + 522 + ], + "type": "text", + "content": "Output the thinking process in tags, and the final answer in tags as follows: ... [action]: enum[complete, close/delete, press_home, click, press_back, type, select, scroll, enter], 'point': [x, y], 'input_text': 'no input text [default]']." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 125, + 536, + 399, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 536, + 399, + 548 + ], + "spans": [ + { + "bbox": [ + 125, + 536, + 399, + 548 + ], + "type": "text", + "content": "Unified Action Space Prompt for Grounding Training and Inference" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 120, + 553, + 490, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 553, + 490, + 608 + ], + "spans": [ + { + "bbox": [ + 120, + 553, + 490, + 608 + ], + "type": "text", + "content": "You are GUI-R1, a reasoning GUI Agent Assistant. In this UI screenshot " + }, + { + "bbox": [ + 120, + 553, + 490, + 608 + ], + "type": "inline_equation", + "content": "<" + }, + { + "bbox": [ + 120, + 553, + 490, + 608 + ], + "type": "text", + "content": " image " + }, + { + "bbox": [ + 120, + 553, + 490, + 608 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 120, + 553, + 490, + 608 + ], + "type": "text", + "content": ", I want you to continue executing the command task, with the action history being history. Please provide the action to perform (Enumerate from [click]), the point where the cursor is moved to (integer) if a click is performed, and any input text required to complete the action." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 120, + 609, + 490, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 609, + 490, + 654 + ], + "spans": [ + { + "bbox": [ + 120, + 609, + 490, + 654 + ], + "type": "text", + "content": "Output the thinking process in tags, and the final answer in tags as follows: ... [‘action’: enum(click], ‘point’: [x, y], ‘input_text’: ‘no input text [default]’." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "text", + "content": "Accuracy rewards. For the model's predicted output " + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "inline_equation", + "content": "o = \\{o^{\\mathrm{act}}, o^{\\mathrm{text}}, o^{\\mathrm{point}}\\}" + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "text", + "content": ", which consists of three components: " + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "inline_equation", + "content": "o^{\\mathrm{act}}" + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "text", + "content": " (action type, e.g., click, scroll), " + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "inline_equation", + "content": "o^{\\mathrm{point}}" + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "text", + "content": " (click point position), and " + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "inline_equation", + "content": "o^{\\mathrm{text}}" + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "text", + "content": " (input text), we define the accuracy reward " + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{acc}}" + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "text", + "content": " as a combination of action type reward " + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{act}}" + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "text", + "content": ", click point reward " + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{point}}" + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "text", + "content": ", and input text reward " + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{text}}" + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{acc}} = R_{\\mathrm{act}} + R_{\\mathrm{point}} + R_{\\mathrm{text}}" + }, + { + "bbox": [ + 104, + 666, + 504, + 721 + ], + "type": "text", + "content": ". This design provides reliable correctness rewards for all actions." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "Action type reward. The action type reward " + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{act}}" + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": " is calculated by comparing the predicted action type " + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "o^{\\mathrm{act}}" + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": " with the ground truth action type " + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "gt^{\\mathrm{act}}" + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "o^{\\mathrm{act}} == gt^{\\mathrm{act}}" + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": ", the reward is 1; otherwise, it is 0. This simple yet effective evaluation mechanism guides action type prediction." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 505, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 505, + 145 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 505, + 145 + ], + "type": "text", + "content": "Click point reward. The click point reward " + }, + { + "bbox": [ + 104, + 110, + 505, + 145 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{point}}" + }, + { + "bbox": [ + 104, + 110, + 505, + 145 + ], + "type": "text", + "content": " is calculated by comparing the predicted click point " + }, + { + "bbox": [ + 104, + 110, + 505, + 145 + ], + "type": "inline_equation", + "content": "o^{\\mathrm{point}} = [x, y]" + }, + { + "bbox": [ + 104, + 110, + 505, + 145 + ], + "type": "text", + "content": " with the ground truth bounding box " + }, + { + "bbox": [ + 104, + 110, + 505, + 145 + ], + "type": "inline_equation", + "content": "gt^{\\mathrm{bbox}} = [x_1, y_1, x_2, y_2]" + }, + { + "bbox": [ + 104, + 110, + 505, + 145 + ], + "type": "text", + "content": ". The calculation formula is as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 240, + 163, + 369, + 190 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 163, + 369, + 190 + ], + "spans": [ + { + "bbox": [ + 240, + 163, + 369, + 190 + ], + "type": "interline_equation", + "content": "R _ {\\text {p o i n t}} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} o ^ {\\text {p o i n t}} \\in g t ^ {\\text {b b o x}}, \\\\ 0 & \\text {o t h e r w i s e}. \\end{array} \\right.", + "image_path": "677a72e3e956b7483678153e39f5b66f055fc6b45c91e7eb23231ce37ea8b638.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 202, + 504, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 202, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 202, + 504, + 236 + ], + "type": "text", + "content": "Input text reward The input text reward " + }, + { + "bbox": [ + 104, + 202, + 504, + 236 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{text}}" + }, + { + "bbox": [ + 104, + 202, + 504, + 236 + ], + "type": "text", + "content": " is calculated by comparing the predicted input text " + }, + { + "bbox": [ + 104, + 202, + 504, + 236 + ], + "type": "inline_equation", + "content": "o^{\\mathrm{text}}" + }, + { + "bbox": [ + 104, + 202, + 504, + 236 + ], + "type": "text", + "content": " with the ground truth text parameter " + }, + { + "bbox": [ + 104, + 202, + 504, + 236 + ], + "type": "inline_equation", + "content": "gt^{\\mathrm{text}}" + }, + { + "bbox": [ + 104, + 202, + 504, + 236 + ], + "type": "text", + "content": " using the semantic " + }, + { + "bbox": [ + 104, + 202, + 504, + 236 + ], + "type": "inline_equation", + "content": "F_1" + }, + { + "bbox": [ + 104, + 202, + 504, + 236 + ], + "type": "text", + "content": " score. The calculation formula is as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 227, + 254, + 381, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 254, + 381, + 281 + ], + "spans": [ + { + "bbox": [ + 227, + 254, + 381, + 281 + ], + "type": "interline_equation", + "content": "R _ {\\text {t e x t}} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} F _ {1} (o ^ {\\text {t e x t}}, g t ^ {\\text {t e x t}}) > 0. 5, \\\\ 0 & \\text {o t h e r w i s e .} \\end{array} \\right.", + "image_path": "6acb2fc7a5adc803fcea522323e025c5759b46399059b10a650a972caa07f27d.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": "Response reward. The final response reward is composed of format rewards and accuracy rewards, defined as: " + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "inline_equation", + "content": "R_{o} = \\alpha R_{\\mathrm{f}} + \\beta R_{\\mathrm{acc}}" + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{f}}" + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": " represents the format reward, " + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{acc}}" + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": " represents the accuracy reward, and " + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": " are weighting parameters respectively." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 342, + 231, + 355 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 231, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 231, + 355 + ], + "type": "text", + "content": "3.3 Training Data Curation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 363, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 363, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 504, + 430 + ], + "type": "text", + "content": "Data collection. We collect data related to GUI tasks from multiple open-source datasets, including FineWeb [21], UIBert [22], AMEX [23], RICOSCA [24], as well as portions of pretraining data from Seeclick [3] and OS-Otlas [1]. This leads to " + }, + { + "bbox": [ + 104, + 363, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\sim 14\\mathrm{M}" + }, + { + "bbox": [ + 104, + 363, + 504, + 430 + ], + "type": "text", + "content": " examples of grounding and low-level task data. Additionally, we collect " + }, + { + "bbox": [ + 104, + 363, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\sim 30\\mathrm{K}" + }, + { + "bbox": [ + 104, + 363, + 504, + 430 + ], + "type": "text", + "content": " high-level GUI data points from OS-Otlas instruction datasets. In total, we gather " + }, + { + "bbox": [ + 104, + 363, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\sim 14\\mathrm{M}" + }, + { + "bbox": [ + 104, + 363, + 504, + 430 + ], + "type": "text", + "content": " examples spanning multiple platforms (including Windows, Linux, MacOS, Android, and Web) and various task granularities (grounding, low-level, and high-level)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 434, + 505, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 505, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 505, + 523 + ], + "type": "text", + "content": "Data filtering. To filter out low-quality data for efficient RFT, we use the Qwen2.5VL-7B [4] model to generate 10 responses for each example and evaluate them using a rule-based reward function designed for unified action space modeling. We remove the problems with an estimated accuracy of 0 or 1 to ensure a stable training process, resulting in 140K low-level data and 1.5K high-level data. Since the quantity of low-level data far exceeds that of high-level data, we randomly sample 1.5K low-level data and combine it with all high-level data to create a balanced dataset of 3K high-quality training examples, named GUI-R1-3K. The distribution of image categories, action types, and corresponding difficulty levels is demonstrated in Figure 3." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 108, + 536, + 299, + 664 + ], + "blocks": [ + { + "bbox": [ + 108, + 536, + 299, + 664 + ], + "lines": [ + { + "bbox": [ + 108, + 536, + 299, + 664 + ], + "spans": [ + { + "bbox": [ + 108, + 536, + 299, + 664 + ], + "type": "image", + "image_path": "742f4555af634ff81413e847bb29c4c5604960cfdf6bbadf2119d5b60dcd2306.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 672, + 282, + 681 + ], + "lines": [ + { + "bbox": [ + 126, + 672, + 282, + 681 + ], + "spans": [ + { + "bbox": [ + 126, + 672, + 282, + 681 + ], + "type": "text", + "content": "(a) Image category quantity and difficulty distribution." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 536, + 503, + 665 + ], + "blocks": [ + { + "bbox": [ + 309, + 536, + 503, + 665 + ], + "lines": [ + { + "bbox": [ + 309, + 536, + 503, + 665 + ], + "spans": [ + { + "bbox": [ + 309, + 536, + 503, + 665 + ], + "type": "image", + "image_path": "eff777f438fcbcc5a1379031cf20f5447ef49173caac6a932d40410b6c6337c6.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 326, + 672, + 485, + 681 + ], + "lines": [ + { + "bbox": [ + 326, + 672, + 485, + 681 + ], + "spans": [ + { + "bbox": [ + 326, + 672, + 485, + 681 + ], + "type": "text", + "content": "(b) Action category quantity and difficulty distribution." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 687, + 504, + 709 + ], + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 709 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 709 + ], + "type": "text", + "content": "Figure 3: Illustrations of image and action category quantity and difficulty distributions in the dataset GUI-R1-3K." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 192, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 192, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 192, + 85 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 99, + 231, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 99, + 231, + 111 + ], + "spans": [ + { + "bbox": [ + 105, + 99, + 231, + 111 + ], + "type": "text", + "content": "4.1 Implementation Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 122, + 504, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 122, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 122, + 504, + 189 + ], + "type": "text", + "content": "Training and inference details. For supervised fine-tuning (SFT), we use the QwenVL2.5-3B/7B [4] model as the base model for experiments and employ the LLaMA Factory [25] framework for one epoch of training to avoid overfitting. For RFT, we use the EasyR1 [26] framework for training over nine epochs. During inference, to ensure fairness, we apply a unified and simple prompt across all comparison methods, and conduct experiments under zero-shot prompt configurations. All experiments are conducted using " + }, + { + "bbox": [ + 104, + 122, + 504, + 189 + ], + "type": "inline_equation", + "content": "8 \\times" + }, + { + "bbox": [ + 104, + 122, + 504, + 189 + ], + "type": "text", + "content": " NVIDIA A100-80G GPUs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 193, + 504, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 193, + 504, + 238 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 504, + 238 + ], + "type": "text", + "content": "Evaluation benchmarks. We evaluate our model on eight agent benchmarks on three different platforms, including AndroidControl-Low [27], AndroidControl-High [27], GUI-Odyssey [28], ScreenSpot [3], ScreenSpot-Pro [29], GUI-Act-Web [30], OmniAct-Web [31], and OmniAct-Desktop [31]. We only use the test splits of these benchmarks for evaluation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 242, + 505, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 242, + 505, + 320 + ], + "spans": [ + { + "bbox": [ + 104, + 242, + 505, + 320 + ], + "type": "text", + "content": "Evaluation metrics. Following Os-Atlas [1], we use three commonly adopted metrics for GUI agents in evaluation: action type prediction accuracy, click point prediction accuracy, and step success rate, denoted as Type, Grounding, and SR, respectively. In more detail, Type measures the exact match score between the predicted action types (e.g., 'click' and 'scroll') and the ground truth. Grounding evaluates the performance of GUI grounding in downstream tasks. Besides, SR represents the step-wise success rate, where a step is deemed successful only if both the predicted action and its associated arguments (e.g., point for click actions and input text for scroll actions) are correct." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 338, + 222, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 338, + 222, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 222, + 350 + ], + "type": "text", + "content": "4.2 Experimental Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 361, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 504, + 384 + ], + "type": "text", + "content": "We here evaluate our GUI-R1 model by comparing it with current state-of-the-art (SOTA) models on various tasks including GUI grounding tasks, GUI low-level tasks, and GUI high-level tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 388, + 504, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 434 + ], + "type": "text", + "content": "Grounding capability. We evaluate the grounding capability of GUI-R1 using ScreenSpot [3] and ScreenSpot-Pro [29]. ScreenSpot assesses GUI grounding performance across mobile, desktop, and web platforms, while ScreenSpot-Pro focuses on high-resolution professional environments, featuring expert-annotated tasks spanning 23 applications, five industries, and three operating systems." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 437, + 505, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 505, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 505, + 559 + ], + "type": "text", + "content": "As shown in Table 1, compared to the previous SOTA model Os-Atlas-7B, which was trained with large-scale data using supervised fine-tuning (SFT), the RFT approach achieves superior performance on the 3B-sized Qwen2.5-VL model using only " + }, + { + "bbox": [ + 104, + 437, + 505, + 559 + ], + "type": "inline_equation", + "content": "0.2\\%" + }, + { + "bbox": [ + 104, + 437, + 505, + 559 + ], + "type": "text", + "content": " of the data (3K vs. 14M). Furthermore, compared to the base models QwenVL2.5-3B/7B and the SFT-trained QwenVL2.5* 3B/7B models using the same dataset, the RFT-based GUI-R1 demonstrates significantly better performance in GUI grounding tasks. Moreover, at the 3B scale, GUI-R1 achieves substantial gains over SFT models on ScreenSpot (80.08 vs. 63.55) and ScreenSpot-Pro (25.23 vs. 13.80), representing improvements of " + }, + { + "bbox": [ + 104, + 437, + 505, + 559 + ], + "type": "inline_equation", + "content": "26.3\\%" + }, + { + "bbox": [ + 104, + 437, + 505, + 559 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 437, + 505, + 559 + ], + "type": "inline_equation", + "content": "82.8\\%" + }, + { + "bbox": [ + 104, + 437, + 505, + 559 + ], + "type": "text", + "content": ", respectively. This highlights the effectiveness of the RL training framework in leveraging small-scale datasets to achieve significant performance improvements, which demonstrates its potential as a data-efficient and scalable approach for model training in resource-constrained environments." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 563, + 504, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 504, + 619 + ], + "type": "text", + "content": "Low-level task capability. We evaluate the low-level task execution capability of GUI-R1 using four benchmark datasets: AndroidControl-Low [27], GUI-Act-Web [29], OmniAct-Web, and OmniAct-Desktop [31]. AndroidControl-Low evaluates low-level task execution on mobile platforms, while GUI-Act-Web and OmniAct-Web focus on low-level task execution on web platforms. OmniAct-Desktop is used to test low-level task execution on desktop platforms." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 623, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 623, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 623, + 505, + 723 + ], + "type": "text", + "content": "As demonstrated in Table 2, our method impressively improves the success rate of GUI low-level tasks for 3B and 7B models, with the average success rate increasing from 55.65 to 80.88 at the 3B scale. Compared to UI-R1 [9], which is concurrent work also trained using RFT, our model achieves a 10-point improvement at the 3B scale, validating that RL training focused on high-level tasks can further enhance the model's understanding of low-level instructions. Note that an interesting observation is that the use of small-scale SFT data even leads to performance degradation on some metrics such as GR on AndroidControl-Low. This limitation stems from SFT's reliance on task-specific labeled data, which constrains the model's ability to adapt to unseen environments. In contrast, our RFT method not only enhances out-of-distribution (OOD) generalization by optimizing" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 110, + 504, + 340 + ], + "blocks": [ + { + "bbox": [ + 105, + 77, + 504, + 109 + ], + "lines": [ + { + "bbox": [ + 105, + 77, + 504, + 109 + ], + "spans": [ + { + "bbox": [ + 105, + 77, + 504, + 109 + ], + "type": "text", + "content": "Table 1: GUI grounding accuracy on ScreenSpot and ScreenSpot-Pro. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 110, + 504, + 340 + ], + "lines": [ + { + "bbox": [ + 106, + 110, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 504, + 340 + ], + "type": "table", + "html": "
ModelsScreenSpot-ProScreenSpot
DevCreativeCADScientificOfficeOSWebDesktop
TextIconTextIconTextIconTextIconTextIconTextIconTextIconText
Supervised Fine-Tuning
SeeClick0.60.01.00.02.50.03.50.01.10.02.80.055.732.572.230.0
Os-Atlas-4B7.10.03.01.42.00.09.05.55.13.85.60.082.663.172.145.7
ShowUI-2B16.91.49.10.02.50.013.27.315.37.510.32.2----
CogAgent-18B14.90.79.60.07.13.122.21.813.00.05.60.070.428.674.220.0
Aria-GUI16.20.023.72.17.61.627.16.420.31.94.70.0----
UGround-7B26.62.127.32.814.21.631.92.731.611.317.80.080.470.482.563.6
Claude**22.03.925.93.414.53.733.915.830.116.311.04.5----
Os-Atlas-7B33.11.428.82.812.24.737.57.333.95.727.14.590.874.291.762.8
QwenVL2.5-3B*20.31.824.62.811.24.739.56.428.65.717.82.273.048.585.746.2
QwenVL2.5-7B*31.41.827.33.515.75.140.77.939.78.932.46.987.868.290.362.8
Zero Shot
QwenVL-7B0.00.00.00.00.00.00.70.00.00.00.00.0----
GPT-4o1.30.01.00.02.00.02.10.01.10.00.00.0----
QwenVL2.5-3B16.21.423.31.410.24.738.26.424.33.815.01.160.843.570.135.0
QwenVL2.5-7B33.12.123.73.512.26.336.87.337.87.530.86.986.965.189.760.0
Reinforcement Fine-Tuning
UI-R1-3B22.74.127.33.511.26.343.411.832.211.313.14.585.273.390.259.3
GUI-R1-3B33.84.840.95.626.47.861.817.353.617.028.15.689.672.193.864.8
GUI-R1-7B49.44.838.98.423.96.355.611.858.726.442.116.991.375.791.873.6
", + "image_path": "cafd0148da670145984d2db9becf6696e576a880ea6b7afab20c4ab996d6e76f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 393, + 504, + 556 + ], + "blocks": [ + { + "bbox": [ + 105, + 360, + 504, + 392 + ], + "lines": [ + { + "bbox": [ + 105, + 360, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 360, + 504, + 392 + ], + "type": "text", + "content": "Table 2: GUI low-level task accuracy on GUI-Act-Web, OmniAct-Web, OmniAct-Desktop, and AndroidControl-Low. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 393, + 504, + 556 + ], + "lines": [ + { + "bbox": [ + 106, + 393, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 106, + 393, + 504, + 556 + ], + "type": "table", + "html": "
ModelsGUI-Act-WebOmniAct-WebOmniAct-DesktopAndroidControl-LowOverall
TypeGRSRTypeGRSRTypeGRSRTypeGRSR
Supervised Fine-Tuning
Os-Atlas-4B79.2258.5742.6246.7449.2422.9963.3042.5526.9464.5871.1940.6250.71
Os-Atlas-7B86.9575.6157.0285.6369.3559.1590.2462.8756.7373.0073.3750.9470.07
QwenVL2.5-3B*76.9566.3461.6966.2456.9153.0277.6262.5463.7671.0874.5358.7965.79
QwenVL2.5-7B*87.6684.7779.8981.6273.4573.3986.2380.1779.8084.0085.7464.3280.09
Zero Shot
GPT-4o77.0945.0241.8479.3342.7934.0679.9763.2550.6774.3338.6728.3954.46
QwenVL2.5-3B56.1064.2855.6150.6346.8947.0256.9547.9746.8962.0374.0759.3255.65
QwenVL2.5-7B86.5984.3978.6379.1571.3271.2184.7479.8979.6683.4487.0862.5079.05
Reinforcement Fine-Tuning
UI-R1-3B75.8979.4367.3175.4261.3561.3373.4164.1263.9879.1582.4166.4470.85
GUI-R1-3B89.8687.4276.3188.5875.1075.0891.8678.3778.3183.6881.5964.4180.88
GUI-R1-7B90.8588.0680.3191.1677.2977.3592.2083.3683.3385.1784.0266.5283.30
", + "image_path": "fc5cfb0efb77c23d45296aad426bb409af65cd88b79315371f87476014280469.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 579, + 504, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 504, + 601 + ], + "type": "text", + "content": "task-specific rewards but also achieves this with fewer training examples, which provides a scalable and efficient alternative to traditional SFT methods." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 607, + 504, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 607, + 504, + 652 + ], + "spans": [ + { + "bbox": [ + 104, + 607, + 504, + 652 + ], + "type": "text", + "content": "High-level task capability. We evaluate the high-level task execution capability of GUI-R1 using AndroidControl-High [27] and GUI-Odyssey [28]. AndroidControl-High evaluates high-level task execution on mobile platforms, while GUI-Odyssey focuses on cross-app navigation scenarios, featuring high-level tasks spanning six applications and 203 apps." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 655, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 506, + 723 + ], + "type": "text", + "content": "As shown in Table 3, due to our unified action space with rule-based reward modeling, GUI-R1 achieves SOTA on high-level GUI tasks. Compared to the closed-source model GPT-4o, our 3B-scale method achieves an absolute improvement of 21.06, demonstrating that RFT, in contrast to SFT, can efficiently and reliably enhance the success rate of GUI agents in real-world tasks. Furthermore, compared to UI-R1 [9], which focuses on improving low-level grounding capabilities, our model achieves an average improvement of 3.4 points at the 3B scale, with a particularly notable" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 136, + 110, + 475, + 306 + ], + "blocks": [ + { + "bbox": [ + 104, + 77, + 504, + 109 + ], + "lines": [ + { + "bbox": [ + 104, + 77, + 504, + 109 + ], + "spans": [ + { + "bbox": [ + 104, + 77, + 504, + 109 + ], + "type": "text", + "content": "Table 3: GUI high-level task accuracy on AndroidControl-High and GUI-Odyssey. All experiments are conducted under the same zero-shot prompt for fair comparison. * denotes supervised fine-tuned on GUI-R1-3K. The best results are in bold." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 136, + 110, + 475, + 306 + ], + "lines": [ + { + "bbox": [ + 136, + 110, + 475, + 306 + ], + "spans": [ + { + "bbox": [ + 136, + 110, + 475, + 306 + ], + "type": "table", + "html": "
ModelsAndroidControl-HighGUI-OdysseyOverall
TypeGRSRTypeGRSR
Supervised Fine-Tuning
OS-Atlas-4B49.0149.5122.7749.6334.6320.2537.63
OS-Atlas-7B57.4454.9029.8360.4239.7426.9644.88
QwenVL2.5-3B*52.0549.5341.2243.6932.2127.3141.00
QwenVL2.5-7B*69.1558.6948.1156.7838.6534.4450.97
Zero Shot
GPT-4o63.0630.9021.1737.5014.175.3628.69
QwenVL2.5-3B47.8146.5138.9037.4026.4926.6937.30
QwenVL2.5-7B68.6759.7147.0655.6037.7834.3750.53
Reinforcement Fine-Tuning
UI-R1-3B57.8555.7045.4452.1634.4632.4946.35
GUI-R1-3B58.0456.2446.5554.8441.5241.3349.75
GUI-R1-7B71.6365.5651.6765.4943.6438.7956.13
", + "image_path": "d0a5c889d078984ece46a418434aaa9819e88f9e9d29655aaa178ba48ae34a91.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 178, + 316, + 430, + 438 + ], + "blocks": [ + { + "bbox": [ + 178, + 316, + 430, + 438 + ], + "lines": [ + { + "bbox": [ + 178, + 316, + 430, + 438 + ], + "spans": [ + { + "bbox": [ + 178, + 316, + 430, + 438 + ], + "type": "image", + "image_path": "3a9ff69a46c697dceba16ce549cd2020caec5b420655f3f43a3832c8234e5942.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 180, + 439, + 429, + 451 + ], + "lines": [ + { + "bbox": [ + 180, + 439, + 429, + 451 + ], + "spans": [ + { + "bbox": [ + 180, + 439, + 429, + 451 + ], + "type": "text", + "content": "Figure 4: Ablation study of image resolution and data quality." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 472, + 504, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 507 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 507 + ], + "type": "inline_equation", + "content": "27.2\\%" + }, + { + "bbox": [ + 104, + 472, + 504, + 507 + ], + "type": "text", + "content": " lead in the step success rate on GUI-Odyssey. This indicates that RL training focused on low-level tasks is insufficient for handling complex high-level instructions. RFT designed for high-level tasks is better suited as a direction for developing GUI agent models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 514, + 195, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 195, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 195, + 526 + ], + "type": "text", + "content": "4.3 Ablation Study" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 534, + 504, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 534, + 504, + 644 + ], + "spans": [ + { + "bbox": [ + 104, + 534, + 504, + 644 + ], + "type": "text", + "content": "Image resolution and data quality. To investigate the impact of image resolution and data quality on GUI RFT, we conduct corresponding ablation experiments, with the results shown in Figure 4. As observed, when using the filtered GUI-R1-3K dataset, the model requires only a few updates to achieve relatively high rewards. In contrast, training with unfiltered and low-quality data necessitates significantly more training time for the model to converge, with a noticeably lower performance ceiling. To further explore the effect of image resolution on model training, we increase the image resolution to twice its original size (from 1,048,576 pixels to 2,097,152 pixels). As shown in Figure 4, because of the high resolution of GUI task images and the small size of many UI elements, increasing the image resolution allows the model to perceive these elements more clearly, which accelerates the convergence speed of RFT and improves the performance ceiling." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 649, + 504, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 649, + 504, + 727 + ], + "spans": [ + { + "bbox": [ + 104, + 649, + 504, + 727 + ], + "type": "text", + "content": "Coefficients in the reward function. To explore the impact of the coefficients for format rewards and accuracy rewards in the reward function on the final performance, we conduct relevant ablation experiments, as shown in Table 4. The results indicate that reducing the coefficient ratio of format rewards leads to consistent performance improvements. This is because format rewards are easier to learn during training and often converge early in the process. By amplifying the accuracy rewards, the advantages of providing correct answers are further emphasized, ultimately leading to more performance improvements." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 176, + 89, + 434, + 152 + ], + "blocks": [ + { + "bbox": [ + 169, + 77, + 441, + 88 + ], + "lines": [ + { + "bbox": [ + 169, + 77, + 441, + 88 + ], + "spans": [ + { + "bbox": [ + 169, + 77, + 441, + 88 + ], + "type": "text", + "content": "Table 4: Ablation study of the coefficient " + }, + { + "bbox": [ + 169, + 77, + 441, + 88 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 169, + 77, + 441, + 88 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 169, + 77, + 441, + 88 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 169, + 77, + 441, + 88 + ], + "type": "text", + "content": " in reward function." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 176, + 89, + 434, + 152 + ], + "lines": [ + { + "bbox": [ + 176, + 89, + 434, + 152 + ], + "spans": [ + { + "bbox": [ + 176, + 89, + 434, + 152 + ], + "type": "table", + "html": "
αβAndroidControl-HighGUI-OdysseyOverall
TypeGRSRTypeGRSR
0.20.858.0456.2446.5554.8441.5241.3349.75
0.50.557.9355.9146.6252.7737.4435.6647.72
0.80.257.8555.7045.4452.1634.4632.4946.48
", + "image_path": "b3b746105ea2ffcd1fb7f8ee095e4365304aba370c8b0e79c09e244405980cdc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 172, + 187, + 183 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 172, + 187, + 183 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 187, + 183 + ], + "type": "text", + "content": "4.4 Visualization" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 192, + 506, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 192, + 506, + 290 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 506, + 290 + ], + "type": "text", + "content": "In Figure 5, we provide additional visualization of the training process. As shown in Figure 5a and Figure 5b, it can be observed that the format reward converges quickly in the early stages of training, while the accuracy reward becomes the main source of differentiated rewards in the later stages of training. Furthermore, as illustrated in Figure 5d, the mean response length first decreases and then gradually increases, but the \"aha moment\" does not occur. This may be due to the single-image input training method in a non-interactive environment, which prevents the model from autonomously tracing back the sequence of incorrect actions. Exploring multi-image high-level tasks in interactive environments could be a potential direction for inducing the emergence of the \"aha moment\" in future research." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 106, + 300, + 302, + 397 + ], + "blocks": [ + { + "bbox": [ + 106, + 300, + 302, + 397 + ], + "lines": [ + { + "bbox": [ + 106, + 300, + 302, + 397 + ], + "spans": [ + { + "bbox": [ + 106, + 300, + 302, + 397 + ], + "type": "image", + "image_path": "e2de6aad495dae889bb09433213fc9eb2b0d78b38f07f8d686d2e22d3d1a9525.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 309, + 300, + 504, + 397 + ], + "blocks": [ + { + "bbox": [ + 309, + 300, + 504, + 397 + ], + "lines": [ + { + "bbox": [ + 309, + 300, + 504, + 397 + ], + "spans": [ + { + "bbox": [ + 309, + 300, + 504, + 397 + ], + "type": "image", + "image_path": "17f917d81e4fa2897b0b75f5c235d52a73e95bc59698049327863f8f4c7e5fdd.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 106, + 413, + 302, + 510 + ], + "blocks": [ + { + "bbox": [ + 137, + 403, + 271, + 412 + ], + "lines": [ + { + "bbox": [ + 137, + 403, + 271, + 412 + ], + "spans": [ + { + "bbox": [ + 137, + 403, + 271, + 412 + ], + "type": "text", + "content": "(a) Accuracy reward curve with training steps." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 106, + 413, + 302, + 510 + ], + "lines": [ + { + "bbox": [ + 106, + 413, + 302, + 510 + ], + "spans": [ + { + "bbox": [ + 106, + 413, + 302, + 510 + ], + "type": "image", + "image_path": "11c1746550af984bf27ac6ab2bfa8bc0572a3d1eff36d53b128703bd9ce95da9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 150, + 515, + 258, + 525 + ], + "lines": [ + { + "bbox": [ + 150, + 515, + 258, + 525 + ], + "spans": [ + { + "bbox": [ + 150, + 515, + 258, + 525 + ], + "type": "text", + "content": "(c) PG loss curve with training steps." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 530, + 504, + 565 + ], + "lines": [ + { + "bbox": [ + 104, + 530, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 504, + 565 + ], + "type": "text", + "content": "Figure 5: Visualization of the training process of GUI-R1. To provide more details, we report the curves of GUI-R1's key metrics during training, including format reward, accuracy reward, mean response length, and policy gradient (PG) loss, as they vary with the training steps." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 309, + 413, + 504, + 510 + ], + "blocks": [ + { + "bbox": [ + 343, + 403, + 469, + 412 + ], + "lines": [ + { + "bbox": [ + 343, + 403, + 469, + 412 + ], + "spans": [ + { + "bbox": [ + 343, + 403, + 469, + 412 + ], + "type": "text", + "content": "(b) Format reward curve with training steps." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 309, + 413, + 504, + 510 + ], + "lines": [ + { + "bbox": [ + 309, + 413, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 309, + 413, + 504, + 510 + ], + "type": "image", + "image_path": "3ccc48d75f9254f3950936c15024e9b372f78d1f4843180668d22b93c0ebadae.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 332, + 515, + 480, + 525 + ], + "lines": [ + { + "bbox": [ + 332, + 515, + 480, + 525 + ], + "spans": [ + { + "bbox": [ + 332, + 515, + 480, + 525 + ], + "type": "text", + "content": "(d) Mean response length curve with training steps." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 589, + 185, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 589, + 185, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 589, + 185, + 601 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 613, + 504, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 504, + 691 + ], + "type": "text", + "content": "This paper presents GUI-R1, which is the first GUI reinforcement learning framework grounded in unified action space rule modeling. By integrating reinforcement fine-tuning with large vision-language models, GUI-R1 enables effective contextual action prediction and verifiable reward-driven learning in GUI environments. Extensive experiments demonstrate that GUI-R1 consistently outperforms baselines on various tasks. Moving forward, we plan to extend GUI-R1 to support collaborative multi-agent interaction and robust error correction policies, enabling the system to handle complex tasks with greater scalability." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 90, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 90, + 505, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 90, + 505, + 124 + ], + "spans": [ + { + "bbox": [ + 111, + 90, + 505, + 124 + ], + "type": "text", + "content": "[1] Zhiyong Wu, Zhenyu Wu, Fangzhi Xu, Yian Wang, Qiushi Sun, Chengyou Jia, Kanzhi Cheng, Zichen Ding, Liheng Chen, Paul Pu Liang, et al. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 130, + 505, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 130, + 505, + 165 + ], + "spans": [ + { + "bbox": [ + 111, + 130, + 505, + 165 + ], + "type": "text", + "content": "[2] Yujia Qin, Yining Ye, Junjie Fang, Haoming Wang, Shihao Liang, Shizuo Tian, Junda Zhang, Jiahao Li, Yunxin Li, Shijue Huang, et al. Ui-tars: Pioneering automated gui interaction with native agents. arXiv preprint arXiv:2501.12326, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 171, + 505, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 171, + 505, + 205 + ], + "spans": [ + { + "bbox": [ + 111, + 171, + 505, + 205 + ], + "type": "text", + "content": "[3] Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 212, + 505, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 212, + 505, + 245 + ], + "spans": [ + { + "bbox": [ + 111, + 212, + 505, + 245 + ], + "type": "text", + "content": "[4] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 252, + 505, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 252, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 111, + 252, + 505, + 285 + ], + "type": "text", + "content": "[5] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 293, + 505, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 293, + 505, + 327 + ], + "spans": [ + { + "bbox": [ + 111, + 293, + 505, + 327 + ], + "type": "text", + "content": "[6] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 334, + 505, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 334, + 505, + 367 + ], + "spans": [ + { + "bbox": [ + 111, + 334, + 505, + 367 + ], + "type": "text", + "content": "[7] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 374, + 505, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 374, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 111, + 374, + 505, + 407 + ], + "type": "text", + "content": "[8] Haozhan Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, 2025. Accessed: 2025-02-15." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 415, + 505, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 415, + 505, + 449 + ], + "spans": [ + { + "bbox": [ + 111, + 415, + 505, + 449 + ], + "type": "text", + "content": "[9] Zhengxi Lu, Yuxiang Chai, Yaxuan Guo, Xi Yin, Liang Liu, Hao Wang, Guanjing Xiong, and Hongsheng Li. Ui-r1: Enhancing action prediction of gui agents by reinforcement learning. arXiv preprint arXiv:2503.21620, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 456, + 505, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 505, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 505, + 489 + ], + "type": "text", + "content": "[10] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 496, + 505, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 496, + 505, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 496, + 505, + 520 + ], + "type": "text", + "content": "[11] Theodore Sumers, Shunyu Yao, Karthik Narasimhan, and Thomas Griffiths. Cognitive architectures for language agents. Transactions on Machine Learning Research, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 526, + 505, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 526, + 505, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 505, + 560 + ], + "type": "text", + "content": "[12] Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, et al. A survey on large language model based autonomous agents. Frontiers of Computer Science, 18(6):186345, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 567, + 505, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 505, + 600 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 505, + 600 + ], + "type": "text", + "content": "[13] Qiushi Sun, Zhangyue Yin, Xiang Li, Zhiyong Wu, Xipeng Qiu, and Lingpeng Kong. Corex: Pushing the boundaries of complex reasoning through multi-model collaboration. arXiv preprint arXiv:2310.00280, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 607, + 505, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 607, + 505, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 607, + 505, + 641 + ], + "type": "text", + "content": "[14] Boyu Gou, Ruohan Wang, Boyuan Zheng, Yanan Xie, Cheng Chang, Yiheng Shu, Huan Sun, and Yu Su. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 647, + 505, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 505, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 505, + 681 + ], + "type": "text", + "content": "[15] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 689, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 505, + 723 + ], + "type": "text", + "content": "[16] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 654 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "[17] Jiawei Liu and Lingming Zhang. Code-r1: Reproducing r1 for code with reliable rewards. arXiv preprint arXiv:2503.18470, 2025." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 101, + 505, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 101, + 505, + 146 + ], + "spans": [ + { + "bbox": [ + 106, + 101, + 505, + 146 + ], + "type": "text", + "content": "[18] Zihan Wang*, Kangrui Wang*, Qineng Wang*, Pingyue Zhang*, Linjie Li*, Zhengyuan Yang, Kefan Yu, Minh Nhat Nguyen, Monica Lam, Yiping Lu, Kyunghyun Cho, Jiajun Wu, Li Fei-Fei, Lijuan Wang, Yejin Choi, and Manling Li. Training agents by reinforcing reasoning, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 153, + 504, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 153, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 107, + 153, + 504, + 177 + ], + "type": "text", + "content": "[19] Zhenyu Pan and Han Liu. Metaspatial: Reinforcing 3d spatial reasoning in vlms for the meta-verse. arXiv preprint arXiv:2503.18470, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 182, + 505, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 182, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 106, + 182, + 505, + 228 + ], + "type": "text", + "content": "[20] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 235, + 505, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 235, + 505, + 269 + ], + "spans": [ + { + "bbox": [ + 106, + 235, + 505, + 269 + ], + "type": "text", + "content": "[21] Guilherme Penedo, Hynek Kydlicek, Anton Lozhkov, Margaret Mitchell, Colin A Raffel, Leandro Von Werra, Thomas Wolf, et al. The fineweb datasets: Decanting the web for the finest text data at scale. In NeurIPS, pages 30811-30849, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 274, + 505, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 274, + 505, + 310 + ], + "spans": [ + { + "bbox": [ + 106, + 274, + 505, + 310 + ], + "type": "text", + "content": "[22] Chongyang Bai, Xiaoxue Zang, Ying Xu, Srinivas Sunkara, Abhinav Rastogi, Jindong Chen, and Blaise Aguera y Arcas. Uibert: Learning generic multimodal representations for ui understanding, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 316, + 505, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 316, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 505, + 350 + ], + "type": "text", + "content": "[23] Yuxiang Chai, Siyuan Huang, Yazhe Niu, Han Xiao, Liang Liu, Dingyu Zhang, Peng Gao, Shuai Ren, and Hongsheng Li. Amex: Android multi-annotation expo dataset for mobile gui agents. arXiv preprint arXiv:2407.17490, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 357, + 505, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 357, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 107, + 357, + 505, + 380 + ], + "type": "text", + "content": "[24] Yang Li, Jiacong He, Xin Zhou, Yuan Zhang, and Jason Baldridge. Mapping natural language instructions to mobile ui action sequences. arXiv preprint arXiv:2005.03776, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 386, + 505, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 386, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 107, + 386, + 505, + 420 + ], + "type": "text", + "content": "[25] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of " + }, + { + "bbox": [ + 107, + 386, + 505, + 420 + ], + "type": "inline_equation", + "content": "100+" + }, + { + "bbox": [ + 107, + 386, + 505, + 420 + ], + "type": "text", + "content": " language models. In ACL, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 427, + 505, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 427, + 505, + 451 + ], + "spans": [ + { + "bbox": [ + 107, + 427, + 505, + 451 + ], + "type": "text", + "content": "[26] Yaowei Zheng, Junting Lu, Shenzhi Wang, and Y Xiong. Easyr1: An efficient, scalable, multimodality rl training framework, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 457, + 505, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 457, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 107, + 457, + 505, + 491 + ], + "type": "text", + "content": "[27] Wei Li, William Bishop, Alice Li, Chris Rawles, Folawiyo Campbell-Ajala, Divya Tyamagundlu, and Oriana Riva. On the effects of data scale on computer control agents. arXiv preprint arXiv:2406.03679, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 498, + 505, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 498, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 107, + 498, + 505, + 533 + ], + "type": "text", + "content": "[28] Quanfeng Lu, Wenqi Shao, Zitao Liu, Fanqing Meng, Boxuan Li, Botong Chen, Siyuan Huang, Kaipeng Zhang, Yu Qiao, and Ping Luo. Gui odyssey: A comprehensive dataset for cross-app gui navigation on mobile devices. arXiv preprint arXiv:2406.08451, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 539, + 505, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 539, + 505, + 573 + ], + "spans": [ + { + "bbox": [ + 107, + 539, + 505, + 573 + ], + "type": "text", + "content": "[29] Kaixin Li, Ziyang Meng, Hongzhan Lin, Ziyang Luo, Yuchen Tian, Jing Ma, Zhiyong Huang, and Tat-Seng Chua. Screenshot-pro: Gui grounding for professional high-resolution computer use. Workshop on Reasoning and Planning for Large Language Models, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 579, + 505, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 579, + 505, + 613 + ], + "spans": [ + { + "bbox": [ + 107, + 579, + 505, + 613 + ], + "type": "text", + "content": "[30] Wentong Chen, Junbo Cui, Jinyi Hu, Yujia Qin, Junjie Fang, Yue Zhao, Chongyi Wang, Jun Liu, Guirong Chen, Yupeng Huo, et al. Guicourse: From general vision language models to versatile gui agents. arXiv preprint arXiv:2406.11317, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 620, + 505, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 620, + 505, + 654 + ], + "spans": [ + { + "bbox": [ + 107, + 620, + 505, + 654 + ], + "type": "text", + "content": "[31] Raghav Kapoor, Yash Parag Butala, Melisa Russak, Jing Yu Koh, Kiran Kamble, Waseem Al-Shikh, and Ruslan Salakhutdinov. Omniact: A dataset and benchmark for enabling multimodal generalist autonomous agents for desktop and web. In ECCV, pages 161-178. Springer, 2024." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10462/963870cb-6527-42ff-97aa-d1b9f35a156b_content_list.json b/data/2025/2504_10xxx/2504.10462/963870cb-6527-42ff-97aa-d1b9f35a156b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..ba4ec1c636597aab1d9ccbc57c47cd176f4d3e31 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/963870cb-6527-42ff-97aa-d1b9f35a156b_content_list.json @@ -0,0 +1,2495 @@ +[ + { + "type": "text", + "text": "The Scalability of Simplicity: Empirical Analysis of Vision-Language Learning with a Single Transformer", + "text_level": 1, + "bbox": [ + 116, + 130, + 880, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Weixian Lei* Jiacong Wang* Haochen Wang* \nXiangtai Li Jun Hao Liew Jiashi Feng Zilong Huang† \n*Equal contribution, † Project Lead \nBytedance Seed", + "bbox": [ + 264, + 202, + 730, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 246, + 308, + 326, + 324 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper introduces SAIL, a single transformer unified multimodal large language model (MLLM) that integrates raw pixel encoding and language decoding within a singular architecture. Unlike existing modular MLLMs, which rely on a pre-trained vision transformer (ViT), SAIL eliminates the need for a separate vision encoder, presenting a more minimalist architecture design. Instead of introducing novel architectural components, SAIL adapts mix-attention mechanisms and multimodal positional encodings to better align with the distinct characteristics of visual and textual modalities. We systematically compare SAIL's properties—including scalability, cross-modal information flow patterns, and visual representation capabilities—with those of modular MLLMs. By scaling both training data and model size, SAIL achieves performance comparable to modular MLLMs. Notably, the removal of pretrained ViT components enhances SAIL's scalability and results in significantly different cross-modal information flow patterns. Moreover, SAIL demonstrates strong visual representation capabilities, achieving results on par with ViT-22B in vision tasks such as semantic segmentation. Code and models are available1.", + "bbox": [ + 86, + 340, + 485, + 672 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 89, + 700, + 220, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The pursuit of multimodal intelligence has driven the development of Multimodal Large Language Models (MLLMs) [49, 61, 68], which typically adopt a modular design: a pre-trained vision encoder (e.g., CLIPViT [15, 62]) extracts image features, a Large Language Model (LLM) [2, 16, 38, 58-60] processes text, and a lightweight projector aligns the two modalities. This framework achieves strong performance through multi-stage pretraining, supervised fine-tuning (SFT), and post-training on multimodal datasets [3, 18, 49, 76, 77, 91]. While effective,", + "bbox": [ + 89, + 726, + 483, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/87aacb4266bb5df777e2e46d47cbe6631fbba6a4cecc29791d9ff98e59ada6b7.jpg", + "image_caption": [ + "(A)", + "Figure 1. (A) Data scaling curve for Modular Multimodal Large Language Model (MLLM) and SAIL, our Single Transformer-based MLLM. As pretraining data increases, the single transformer SAIL shows a sharper performance gain, demonstrating its superior data scalability. (B) Comparison to existing Single Transformer-based MLLMs: our SAIL pushes the performance boundaries on both vision tasks and vision-language tasks." + ], + "image_footnote": [], + "bbox": [ + 514, + 316, + 714, + 458 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/74ee2a817ef3268b95e72eeb8f2fb43f406463207bb7cde5d1a436e7ac6a0bd7.jpg", + "image_caption": [ + "(B)" + ], + "image_footnote": [], + "bbox": [ + 715, + 316, + 903, + 458 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "this modular MLLM paradigm inherently fragments multimodal processing, reinforces reliance on pretrained visual encoders, which may limit deployment flexibility and scalability [11, 21, 54].", + "bbox": [ + 511, + 594, + 905, + 655 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A promising alternative is to eliminate the visual encoder entirely and process raw image patches and text tokens within a single Transformer. This unified architecture removes modality-specific modules, enabling parameter sharing and end-to-end learning of vision-language interactions. Previous works [11, 21, 54] have primarily explored the architecture design, training data, and methods of Single Transformer-based MLLMs. However, little exploration has been given to their fundamental properties, such as scalability, cross-modal information flow patterns, and visual representation capabilities. A deeper understanding of these properties is crucial for unlocking the full potential of Single Transformer-based MLLMs.", + "bbox": [ + 509, + 656, + 906, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work, we present an experimental analysis of the fundamental properties of Single Transformer-based MLLMs and compare them to modular MLLMs (e.g.", + "bbox": [ + 511, + 854, + 908, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "ByteDance | Seed", + "bbox": [ + 89, + 88, + 316, + 109 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10462v1 [cs.CV] 14 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1https://github.com/bytedance/SAIL", + "bbox": [ + 107, + 886, + 377, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "LLaVA [49]). Additionally, in the absence of a pre-trained visual encoder, Single Transformers have to learn visual representations from scratch. Thus, an intriguing question arises: can a trained Single Transformer emerge as a strong vision encoder?", + "bbox": [ + 89, + 114, + 480, + 188 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We conduct a series of experiments to train and study our Single trAnformer model for vIsion and Language (SAIL). While we do not propose novel architecture designs, we introduce necessary modifications to enable the model to process different modalities in a unified architecture. In its micro architecture design, we address the different spatial characteristics of 2D images and 1D text data by employing a mixed attention mechanism: bidirectional attention for image patches and causal attention for text tokens, combined with multimodal rotary position embedding. Through model and data scaling, SAIL achieves performance on vision-language benchmarks comparable to modular MLLMs, while also functioning as a high-performing vision backbone, as shown in Figure 1.", + "bbox": [ + 89, + 191, + 482, + 402 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "More concretely, our empirical analysis uncovers following striking advantages of Single Transformer architectures: (i) Superior Data Scaling: In controlled experiments, SAIL exhibits steeper performance gains as pretraining data scales. While LLaVA-style modular MLLMs initially perform well, our model's performance becomes very close to theirs when pretrained on 512M samples, as shown in Figure 1(A). This suggests that unified architectures can effectively leverage large-scale data and potentially match the performance of modular MLLMs.", + "bbox": [ + 89, + 404, + 482, + 556 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "(ii) Vision Centric Information Flow Pattern: Through analysis of attention distributions, we observe that Single Transformers assign significantly higher attention scores to image tokens during token prediction compared to modular MLLMs. This indicates that the information flow in Single Transformer MLLMs is more direct, with visual tokens influencing prediction tokens more prominently, highlighting a vision-centric approach to decision-making.", + "bbox": [ + 89, + 559, + 482, + 679 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "(iii) Vision Encoder Functioning: Our experiments further demonstrate that the pretrained Single Transformer inherently serves as a powerful vision encoder. Comprehensive evaluations on vision-centric tasks, such as image classification and semantic segmentation, show that the model learns rich visual representations during multimodal pretraining. These representations enhance its capacity for both semantic-level comprehension (e.g., object categorization) and pixel-level understanding (e.g., fine-grained segmentation masks), bridging high-level abstraction and low-level visual reasoning within a unified architecture.", + "bbox": [ + 89, + 681, + 482, + 845 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our findings indicate that Single Transformer-based MLLMs hold great promise in surpassing modular MLLMs in terms of leveraging large-scale data, forming direct vision-centric information pathways, and functioning as effective vision encoders. We hope", + "bbox": [ + 89, + 848, + 482, + 924 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "our empirical findings inspire further research to refine and enhance Single Transformer architecture, ultimately driving advancements in multimodal intelligence from a new perspective.", + "bbox": [ + 511, + 114, + 903, + 175 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 511, + 191, + 653, + 208 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Paradigms in Vision-Language Model Design", + "text_level": 1, + "bbox": [ + 511, + 219, + 895, + 234 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Modular MLLMs with Visual Encoders. The prevailing approach in MLLM design employs modular architectures [3, 49, 61, 68, 77] that rely on pretrained vision encoders (e.g., CLIP-ViT [15, 62], InternViT [14]) to process visual inputs. The visual features extracted from these frozen encoders are then aligned with LLM input spaces via linear [7, 49, 75, 91] or cross-attention layers [1, 33]. While this module design enables effective transfer of pretrained visual-language knowledge, it also introduces several limitations. First, incorporating a separate ViT encoder significantly slows down both training and inference, increasing deployment complexity and requiring costly infrastructure—especially when compared to a single transformer unified model. Second, common strategies for integrating visual features, such as direct mapping into LLM inputs [7, 49, 91] or sharing them across LLM layers [1, 33], often struggle to reconcile the inherent differences between images and text representations. Finally, as model scale, balancing the interactions between the the encoder, LLM, and alignment layers becomes increasingly challenging [11, 54]. Thus, in this work, we explore a single transformer-based MLLM architecture that eliminates the ViT encoder and alignment components to overcome these challenges.", + "bbox": [ + 511, + 243, + 903, + 604 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Single Transformer-based MLLMs Without Visual Encoders. Emerging research explores end-to-end architectures that process raw image patches and text tokens through a single Transformer, bypassing visual encoders entirely. These monolithic designs fall into two categories: continuous tokenization and discrete tokenization. Continuous tokenization, exemplified by Fuyu-8B [5] and SOLO [11], directly maps patches to LLM embeddings via linear projections, enabling flexible resolution handling but requiring massive pretraining data. Discrete tokenization, adopted by Chameleon [67] and Emu3 [79], employs VQ-VAE tokenizers to compress images into discrete tokens, trading pixel-level fidelity for generation capabilities. While later efforts such as EVE [21] and MonoInternVL [54] demonstrate the feasibility of encoder-free training, critical gaps remain: (1) Existing methods rely on extra designs and auxiliary loss [21], complicating training pipelines; (2) The scaling laws and fundamental properties of purely end-to-end trained models remain poorly understood; (3) Vision-language interaction in shared parameter spaces lacks systematic analysis—most prior MLLMs", + "bbox": [ + 511, + 606, + 903, + 924 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "default to causal attention for processing image-text sequences. In this work, we reveal that enabling bidirectional attention between image patches significantly enhances visual representation learning, addressing a key limitation in current designs. More importantly, our study bridges these gaps by establishing foundational principles for training scalable, self-contained single-transformer MLLMs.", + "bbox": [ + 89, + 114, + 480, + 219 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Vision Representation Learning", + "text_level": 1, + "bbox": [ + 89, + 231, + 372, + 247 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Learning effective vision representations is a core challenge in computer vision research, with extensive works [4, 19, 20, 26, 31, 80] dedicated to this problem. With the proliferation of large-scale web-sourced image-text datasets [6, 27, 63], recent methods leverage this data to train deep visual representations via three primary paradigms:", + "bbox": [ + 89, + 253, + 480, + 344 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Text as Classification Labels. Early methods used textual descriptions as weak supervision by extracting categorical labels from captions. For example, frameworks like Tag2Text [34] and RAM [89] used ViTs [23] to predict noun-based pseudo-labels from datasets like CC12M [6]. CatLIP [57] scaled labels to millions using object-centric supervision, and SuperClass [35] directly used tokenized text tokens as classification categories.", + "bbox": [ + 89, + 345, + 482, + 465 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Image-Text Contrastive Learning Contrastive pretraining, as exemplified by CLIP [15, 62] and ALIGN [37], aligns global image-text embeddings within a shared latent space. Subsequent works [8, 14, 42, 45, 78, 87] focused on enhancing CLIP's performance and improving training efficiency.", + "bbox": [ + 89, + 465, + 482, + 556 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Text as Autoregressive Targets. Caption generation as a pretext task is another approach for visual representation learning. SimVLM [80] trains encoder-decoder architectures to autoregressively predict captions, while CapPa [71] trains vision encoders through sequence prediction. These methods often retain modular designs or auxiliary components like contrastive losses [83]. Our work aligns with this category but removes architectural fragmentation by jointly modeling image patches and text tokens in a single Transformer. We find that the pre-trained Single Transformer learns transferable vision representations, enabling it to handle downstream multimodal understanding tasks and function as a vision encoder without modifications.", + "bbox": [ + 89, + 556, + 482, + 752 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. SAIL: Training a Single Transformer for Vision and Language", + "text_level": 1, + "bbox": [ + 89, + 767, + 482, + 803 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Model Architecture", + "text_level": 1, + "bbox": [ + 89, + 810, + 277, + 824 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "SAIL is built upon a unified Transformer architecture (Figure 2(A)) that processes multimodal inputs through streamlined, modality-specific preprocessing. For text, raw input is tokenized using the language model's tokenizer and then transformed into embeddings via the textual embedding module. For images, we partition the input into fixed-", + "bbox": [ + 89, + 833, + 482, + 924 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "size patches and project them into continuous embeddings via a linear projection. Additionally, we maintain a list of special tokens explicitly designed for visual modality encoding: and tokens mark the beginning and end of an image patch span, respectively. In multimodal scenarios, such as image-text pairs, these embeddings are concatenated into a single sequence and fed into the Transformer, enabling joint cross-modal interactions through unified self-attention layers. This design eliminates the need for modality-specific encoders, which efficiently processess heterogeneous data within a single transformer framework.", + "bbox": [ + 511, + 114, + 903, + 295 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Bidirectional attention within image patches. While existing multimodal large language models (MLLMs) [41, 48, 49, 91] predominantly adopt causal attention for autoregressive sequence modeling, our experiments reveal that enabling full bidirectional attention among tokens from the same image significantly enhances visual representation learning and boosts downstream vision-language task performance. Note that previous Single Transformer works [11, 21, 54, 79] have only utilized causal attention, without exploring the potential of mixed attention mechanisms.", + "bbox": [ + 511, + 314, + 903, + 479 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As illustrated in Figure 2(B), for SAIL we implement a mixed attention scheme: (1) For text tokens, we preserve causal attention to maintain autoregressive generation capabilities, allowing each token to attend only to its predecessors. (2) For image tokens, we activate full bidirectional attention within each image patch group, empowering every visual token to interact with all others in the same image. This design captures holistic spatial relationships and contextual dependencies among visual elements, addressing the under-explored potential of attention mechanisms in cross-modal alignment. The improved interaction paradigm not only refines vision-language feature fusion but also provides stronger inductive biases for complex reasoning tasks.", + "bbox": [ + 511, + 498, + 903, + 695 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Multimodal Rotary Position Embeddings. Following [77], we implement Multimodal RoPE (M-RoPE) in SAIL to harmonize positional modeling for multimodal inputs. The method decomposes positional encoding into two axes: height, and width. For text tokens, all axes share uniform position IDs (aligned with 1D-RoPE), whereas for images, height/width IDs adaptively map to token coordinates, as is shown in Fig 2(C). Notably, position indexing is sequentially initialized across modalities (e.g., starting from images before extending to subsequent text), preserving inter-modal consistency. M-RoPE not only improves positional sensitivity but also constrains absolute position magnitudes for visual tokens, facilitating robust generalization to extended sequences in inference.", + "bbox": [ + 511, + 712, + 903, + 924 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 946, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f93ce8b5d07f651bb28ff57f3b09ece10f95d308a8933e9753d6ce14a5c1df50.jpg", + "image_caption": [ + "Figure 2. Model architecture and micro-designs for SAIL. (A) Model Architecture: SAIL is a unified transformer that processes both images and texts without extra module designs. (B) Mixed Attention Mechanism: we adopt bidirectional attention for image patches from the same image and causal attention for text tokens. Examples for a multimodal sequence and a text sequence are provided. Colored squares represent \"allow to attend\" and white squares indicate \"prevent from attending\". (C) Multimodal RoPE: an illustration of the multimodal rotary position embedding for SAIL, with examples for a multimodal sequence and a text sequence." + ], + "image_footnote": [], + "bbox": [ + 125, + 117, + 504, + 388 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/02d4e6e0e828472c97f4452200369a3fa7e8e4ab06ccd32a8e320982bf2f5130.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 535, + 112, + 867, + 388 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Pretraining", + "text_level": 1, + "bbox": [ + 89, + 486, + 217, + 503 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We apply a two-stage curriculum to progressively strengthen the visual perception of SAIL while preserving its inherent language capabilities.", + "bbox": [ + 89, + 510, + 482, + 556 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Stage 1: Accelerated Visual Knowledge Acquisition. In this stage, we pretrain SAIL on large-scale image-text pairs to rapidly bootstrap its visual understanding. To maximize data throughput, we uniformly resize all images to a lower resolution (e.g., $224 \\times 224$ ), reducing multimodal sequence lengths and enabling the model to process more samples within fixed training time. To prevent catastrophic forgetting of linguistic knowledge, we interleave pure text corpora with multimodal data during training. This hybrid approach ensures efficient exposure to visual patterns while maintaining robust language proficiency.", + "bbox": [ + 89, + 556, + 482, + 724 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Stage 2: Enhancing Any-Resolution Image Understanding. Real-world applications require robustness to images of varying resolutions and aspect ratios, such as documents, charts, or infographics. Following prior works [11, 21], we extend pretraining with an any-resolution strategy: images retain their native resolutions during processing, and positional embeddings adapt dynamically to arbitrary spatial dimensions. This stage further refines SAIL's ability to model fine-grained visual details (e.g., tabular structures, text-rich graphics) while continuing to incorporate text-only data for language capability preservation.", + "bbox": [ + 89, + 726, + 482, + 892 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Pretraining Objective. Throughout both stages, we optimize the standard language modeling loss only on text to-", + "bbox": [ + 89, + 893, + 482, + 922 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "kens. Image patches and special visual tokens are excluded from loss computation.", + "bbox": [ + 511, + 487, + 906, + 517 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Supervised Fine-tuning", + "text_level": 1, + "bbox": [ + 513, + 561, + 728, + 578 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During the Supervised Fine-tuning (SFT) stage, we train SAIL on publicly available, multi-source instruction datasets to enhance its understanding of complex linguistic instructions and diverse dialogue patterns critical for real-world deployment. This phase fine-tunes the entire network architecture, focusing on aligning the model's responses with human intent through exposure to varied instructional formats and multimodal interactions.", + "bbox": [ + 511, + 595, + 906, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1 shows the details of training datasets for pretraining and supervised fine-tuning (SFT) across all stages. During Stage 1 pretraining, we utilized mixed multimodal and pure text datasets including Recap-DataComp-1B [43] and SlimPajama [66], with images at a resolution of $224\\mathrm{x}224$ totaling 512M image-text pairs. In Stage 2, the pretraining datasets include Capfusion [84], self-curated OCR data from LAION COCO [63], InfinityMM Stage 2 subset [29], and SlimPajama, utilizing the any resolution (AnyRes) strategy, with a combined total of 86M image-text pairs along with text data. The SFT stage employed the InfinityMM Stage 3 subset, processed at any resolution, containing 6M image-text pairs.", + "bbox": [ + 511, + 727, + 908, + 924 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 503, + 958 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/b962b4ccb763cd14712f5e425147173100dfb6ec6faff3b2f4df0162cf8345b5.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
StageDatasetImg.ResNum
Pretraining S1Recap-DataComp-1B [43]224x224512M
SlimPajama [66]-
Pretraining S2Capfusion [84]AnyRes60M
OCR from LAION COCO [63]7M
InifinityMM Stage 2 subset [29]19M
SlimPajama [66]-
SFTInifinityMM Stage3 [29]AnyRes6M
", + "bbox": [ + 94, + 111, + 478, + 233 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1. Details of training datasets used across all stages. \"Img.Res\" refers to the image resolution settings applied during each training stage. All datasets listed are publicly available. Note that these settings represent the default configuration for standard SAIL training, while separate settings are used for scaling experiments and ablation studies.", + "bbox": [ + 89, + 238, + 482, + 323 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 89, + 330, + 215, + 348 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 89, + 356, + 297, + 373 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation Benchmarks. For evaluation of vision and language tasks, we evaluate SAIL and existing MLLMs on a broad range of multimodal benchmarks. Specifically, MLLM benchmarks encompass MMBench-EN [50], SEEDBench-IMG [40], MMVet [85], MME [46], HallusionBench [30], MathVistaMINI [53], and OCR-Bench [51]. Visual question answering benchmarks include TextVQA [65], ScienceQA-IMG [52], AI2D [39], MMStar [9], RealWorldQA [81].", + "bbox": [ + 89, + 378, + 482, + 515 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For evaluation of vision representation learning, we conduct experiments on ImageNet-1K [20] for image classification, ADE20K for semantic segmentation [90], and ARO [86] for attribute, relation, and ordering.", + "bbox": [ + 89, + 515, + 482, + 575 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation Details. For pretraining, we initialize SAIL from the Mistral-7B-v0.1 base LLM and set the patch size to 14. We modify Megatron [64] to support SAIL's multimodal input. Pretraining uses 128 NVIDIA A100 80G GPUs with 2-way tensor parallelism and 64-way data parallelism. The learning rate is set at 5e-5 and decays cosinely to a minimum of 5e-6. For training efficiency, we concatenate sequences from different data samples into one long sequence of 32,768 tokens, adjusting the attention mask to ensure that tokens from different samples do not attend to each other. We use a round-robin approach to interleave image-text packed sequences and pure text packed sequences, configuring the global batch to contain approximately 16K image-text pairs.", + "bbox": [ + 89, + 575, + 482, + 787 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For Supervised Fine-Tuning (SFT), the global batch size is set to 512. Training is performed for one epoch with a maximum learning rate of 1e-5, following a linear warm-up phase and then transitioning to a cosine decay schedule.", + "bbox": [ + 89, + 787, + 482, + 848 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For vision, we load the checkpoint after Stage 1 pretraining and keep it frozen for downstream evaluations, including (1) image classification on ImageNet-1K [20], (2) semantic segmentation on ADE20K [90], and (3) attribute, relation and, ordering on the ARO benchmark [86]. Specif", + "bbox": [ + 89, + 848, + 482, + 925 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ically, (1) for image classification, we utilize an attention-based classifier [25] with 90 epochs of linear probing, where detailed configurations are mostly obtained from common practices [32, 73, 74]. Images are resized to $224 \\times 224$ and the global batch size is 8,192 across 8 A100 (80G) GPUs. (2) For semantic segmentation, we adopt ViT-Adapter [12] with UperNet [82] as the segmentation decoder. The implementation is based on MMSegmentation [17] with 80k training iterations. The input resolution is $512 \\times 512$ and the global batch size is 16 across 8 A100 (80G) GPUs. (3) For attribute, relation and, ordering, we regard the negative of the caption loss over each image-text pair as the similarity metric for retrieval.", + "bbox": [ + 511, + 114, + 906, + 311 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Experimental Results", + "text_level": 1, + "bbox": [ + 511, + 321, + 714, + 338 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2.1. Results on Vision Language Tasks", + "text_level": 1, + "bbox": [ + 511, + 345, + 795, + 359 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Table 2, we compare SAIL against existing MLLMs across 13 vision-language benchmarks. SAIL consistently outperforms other Single Transformer-based models like Fuyu [5], EVE [21], SOLO [11], MonoInternVL [54], and EVE2 [22] across diverse vision-language tasks. This demonstrates that SAIL can achieve significant performance gains and push the boundaries of Single Transformer-based MLLMs without needing extra component designs or auxiliary training losses. Moreover, when compared to methods employing discrete vision tokens (e.g., Chameleon and Emu3), SAIL demonstrates superior performance. These results validate that scaling up single-transformer pretraining effectively enhances cross-modal alignment between images and text. Compared to the state-of-the-art modular MLLM LLaVA-OneVision [41], SAIL achieves comparable performance on some benchmarks, such as MMStar, SEEDBench-IMG, and RealWorldQA. While the performance of Single Transformer-based MLLMs still lags behind modular MLLMs in certain areas, we hypothesize that scaling the pretraining data volume or incorporating higher-quality instruction-tuning data will bridge the remaining performance gap.", + "bbox": [ + 511, + 364, + 906, + 698 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2.2. Results on Vision Representation Learning", + "text_level": 1, + "bbox": [ + 511, + 707, + 857, + 722 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we compare the quality of learned visual representations of our SAIL with other Single Transformer-based alternatives, including EVE [21], EVE2 [22], and SOLO [11].", + "bbox": [ + 511, + 726, + 906, + 787 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Classification and Segmentation. As demonstrated in Table 3, our method, SAIL, achieves a Top-1 accuracy of $84.95\\%$ and a Top-5 accuracy of $97.59\\%$ on the validation set of ImageNet-1K [20], significantly outperforming state-of-the-art alternatives [11, 21, 22]. In the segmentation task, SAIL also demonstrates superior performance with an mIoU of $55.30\\%$ , an mAcc of $67.24\\%$ , and an aAcc of $84.87\\%$ , illustrated in Table 3. These results indicate that SAIL is effective in both classification and segmentation tasks, of", + "bbox": [ + 511, + 787, + 908, + 924 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/0cb729d503de7c017da687d7d77c3458500e81ea0c18677e9175ecc68a34137c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Method#Param#Data#VtokenGeneral VQAHallucinationMath&knowledgeOCR VQA
MMS*MMBenSEEDIMMVMMERWQAPOPEHalluSQA1MathVTQAAI2DOCRB
Modular MLLMs:
InternVL-1.5 [13]2.2B-/-332846.770.969.839.3190257.988.337.384.941.370.569.8654
QwenVL-Chat [3]7B7.2B / 50M25634.560.658.2-184849.3-36.868.235.361.545.9488
LLVA-1.5 [47]7B0.4B+ / 665K57633.164.364.330.5185954.885.927.666.825.546.154.8318
LLVA-1.6 [48]7B0.4B+ / 760K288037.667.464.743.9184257.886.427.670.232.564.966.6532
Cambrian-1 [69]8B10B+ / 7M57650.775.974.7--64.2-30.680.448.171.773.0-
LLVA-OneVision [41]7B10B+ / 3.2M729060.981.774.858.8199865.5--96.656.1-81.6-
Single Transformer-based MLLMs:
Fuyu [5]8B-/--34.410.759.321.4-43.78429.856.830.2-46.8366
Chameleon [67]7B1.4B+ / 1.8M102431.131.130.68.31703919.417.147.222.54.846.07.0
EVE [21]7B33M / 1.8M2304-52.364.625.71628-85.0-64.9-56.861.0398
SOLO [11]8B43.7M / 2M102435.867.764.430.4126044.778.640.473.332.925.061.4126
Mono-InternVL [54]3B1.3B / 7M6400-65.567.440.11875--45.793.645.772.668.6767
Emu3 [79]8B-/-16K46.658.568.237.2-57.485.231.789.231.364.770.0687
EVE2 [22]7B92M / 7.3M2500-66.371.445.0170962.487.6-96.2-71.174.8702
SAIL7B600M / 6M360053.170.172.946.3171963.985.854.293.357.077.176.7783
", + "bbox": [ + 94, + 111, + 903, + 343 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/b1c4ad969f48db6a7414b4fd603874024b5c5b9bced4b811a8e22034583fe0c3.jpg", + "table_caption": [ + "Table 2. Comparison with existing vision-language models on various vision-language benchmarks, including MMS*: MMStar [9]; MMBen:MMBench-EN [50]; SEED:SEEDBench-Img [40]; MMV:MMVet [85]; MME [46]; POPE [44]; Hallu: HallusionBench [30]; SQA: ScienceQA-Img [52]; TVQA: TextVQA [65]; MathV: MathVistaMINI [53]; AI2D [39]; RWQA: RealWorldQA [81]; OCRB:OCR-Bench [51]. Note that #A-Param denotes the number of activated parameters; #Data represents the pre-training / fine-tuning data volume; #Vtoken indicates the maximum image patch tokens. For MME, we report the sum of perception and cognition scores. The top two results are highlighted in bold and underline, respectively. All results are derived from those reported in other papers and the official reproduction results from the OpenCompass leaderboard [24]. Our results are obtained by VLMEvalKit [24]." + ], + "table_footnote": [], + "table_body": "
MethodClassificationSegmentation
Top-1Top-5mIoUmAccaAcc
EVE [21]42.0365.7727.1235.8972.91
EVE2 [22]44.8669.4140.8553.5379.31
SOLO [11]59.1080.8935.1144.8176.02
SAIL84.9597.5955.3067.2484.87
", + "bbox": [ + 91, + 454, + 480, + 559 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/35b227d6c52d257cd44604f0355dc61261e44c4898ffbe3227dfe22da29d8176.jpg", + "table_caption": [ + "Table 3. Comparison on image classification and semantic segmentation with other encoder-free approaches. Our SAIL outperforms other alternatives by a large margin." + ], + "table_footnote": [], + "table_body": "
Method#Data#ParamImageNet-1KADE20K
OpenCLIP-H [15]2B0.6B84.4-
OpenCLIP-G [15]2B1.8B86.239.3†
ViT-22B [19]3B22B89.555.3
InternViT [14]6B6B88.258.7
SAIL0.5B7B85.055.3
", + "bbox": [ + 94, + 625, + 473, + 724 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "fering substantial improvements over existing methods. In Table 4, even when comparing with other state-of-the-art vision backbones, our SAIL manages to achieve remarkable competitive performance with significantly less training data, demonstrating the scaling property of SAIL.", + "bbox": [ + 89, + 816, + 482, + 892 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Attribute, Relation, and Ordering. To systematically evaluate the ability of SAIL to understand different types of", + "bbox": [ + 89, + 893, + 483, + 924 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/78c033c10d95a55f2ce039c403daf56a8cb6a605be2c6ec2a69fb139cd0433f2.jpg", + "table_caption": [ + "Table 4. Comparison on image classification and semantic segmentation with other vision backbones. $\\dagger$ indicates training with head tuning using UperNet [82], while others are based on ViT-Adapter [12]. SAIL, with significantly less training data, achieves competitive performance." + ], + "table_footnote": [], + "table_body": "
MethodRelationAttributeOrder
COCOFlickr30K
OpenCLIP-H [15]49.964.632.640.4
OpenCLIP-G [15]49.965.633.038.3
CLIP-B/32 [62]59.262.948.157.9
CLIP-L/14 [62]61.261.746.856.8
InternViT [14]59.666.073.476.3
NegCLIP [86]81.071.086.091.0
CapPa [72]86.785.798.899.2
SAIL100.099.5100.0100.0
", + "bbox": [ + 516, + 454, + 903, + 614 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 5. Comparison on attribute, relation, and ordering (ARO) with other vision backbones. SAIL almost encodes compositional relationships between objects and attributes perfectly.", + "bbox": [ + 511, + 621, + 906, + 664 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "relationships, attributes, and order information, we conduct experiments on the ARO benchmark [86]. As demonstrated in Table 5, SAIL encodes compositional relationships between objects and attributes almost perfectly, significantly surpassing other state-of-the-art vision backbones.", + "bbox": [ + 511, + 676, + 903, + 753 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For additional vision-related tasks, please refer to PixelSAIL [88] for SAIL's downstream capabilities in pixel-grounded understanding.", + "bbox": [ + 511, + 753, + 903, + 799 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Properties of Single Transformer", + "text_level": 1, + "bbox": [ + 511, + 806, + 802, + 823 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.1. Scaling Properties.", + "text_level": 1, + "bbox": [ + 511, + 829, + 691, + 844 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Model Scaling: We selected models of different sizes: SAIL-0.5B, SAIL-3B, and SAIL-7B (SAIL by default) for our experiments. Each model underwent Stage 1 pretraining on a mixed multimodal and pure text dataset, encountering 512M image-text pairs. Subsequently, they were fine", + "bbox": [ + 511, + 848, + 903, + 924 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e8f37e8f122c2c39ea9d4d546ac139f8ce9719c6c97ce52462f5ab50d470db13.jpg", + "image_caption": [ + "Figure 3. Model scaling of SAIL. Left: As the model size increases, the training language modeling loss gradually decreases. Right: As the model size increases, performance on downstream VLM tasks progressively improves." + ], + "image_footnote": [], + "bbox": [ + 93, + 111, + 287, + 236 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/08e984e5b7eac62672319f52aa00b0a3fb3f0b7e9bc754dbd7bad2e6f2348257.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 111, + 480, + 236 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tuned on the LLaVA-mix-665K dataset using the any resolution (anyres) strategy. We evaluated the models based on their performance on vision and language benchmarks after supervised fine-tuning.", + "bbox": [ + 89, + 328, + 482, + 388 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The normalized performance of SAIL against model size is plotted in Figure 3. As the model size scales up, we observe a corresponding enhancement in performance. Additionally, as shown on the left side of Figure 3, the training language modeling loss decreases with increasing model size. This reduction in training loss indicates that larger models have a greater capacity to learn multimodal alignments effectively, enabling them to capture complex relationships between vision and language more accurately. The improved learning capacity directly translates to better performance on downstream VLM tasks, showcasing the benefits of scaling up the Single Transformer architecture.", + "bbox": [ + 89, + 391, + 482, + 571 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Data Scaling: we compared SAIL with its modular MLLM counterpart. For the modular MLLM, we used SigLIPSO [87] as the vision encoder, and the language model shared the same architecture and initialization parameters as SAIL. Both models were pre-trained using Pretraining stage-1 setting, with SAIL encountering 32M, 128M, and 512M image-text pairs during training, followed by fine-tuning on the LLaVA-mix-665K dataset. All parameters of both models are trainable. Both models employ an identical number of input tokens for images and text. The normalized performance of both models is plotted in Figure 1(A). The results show that in the low-data regime (32M), SAIL's performance lags behind the modular MLLM, likely due to SigLIP's prior training on 40B samples. However, as the data scales, SAIL exhibits a steeper performance curve, indicating more promising data scaling properties. At 512M image-text pairs, SAIL achieves performance comparable to the modular MLLM in our evaluation subset. This demonstrates the single transformer's superior data scalability, even without a pretrained vision encoder.", + "bbox": [ + 89, + 574, + 482, + 876 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Quantitative results on evaluated benchmark tasks of model scaling and data scaling are tabulated in the appendix.", + "bbox": [ + 89, + 878, + 483, + 925 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/23f836472eb6724001df347ec8381d670f86dae4957d99f0de1396cfaa1bc7f7.jpg", + "image_caption": [ + "Figure 4. Image Attention Score Allocation: The figure shows the proportion of image attention scores across different transformer layers for Single Transformer-based MLLM and modular MLLM when predicting tokens. Single Transformer-based MLLM generally allocates higher attention weights to image tokens compared to modular MLLM." + ], + "image_footnote": [], + "bbox": [ + 516, + 112, + 901, + 311 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.2. Information Flow Pattern", + "text_level": 1, + "bbox": [ + 511, + 431, + 738, + 445 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Different attention pattern compared to modular MLLM: since our comparative experiments show that the Single Transformer model exhibits more promising data scaling properties, we conducted an analysis of the trained SAIL model and its modular MLLM counterpart. Specifically, we followed the methodology from FastV [10] to analyze the attention score distribution for each predicted token given an image and a user query. This analysis focuses on how much attention is allocated to image tokens during token prediction. We selected 1000 samples from various datasets including VQAv2, GQA, TextVQA, DocVQA, MME, SEEDBench-IMG, MMBench, and some self-curated dialog examples. For each model prediction, we computed the average attention scores assigned to previous image tokens by the output token.", + "bbox": [ + 511, + 452, + 906, + 678 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conducted a comparative experiment between Single Transformer-based MLLMs and modular MLLMs. The Single Transformer-based MLLMs included SAIL, SOLO [11], and EVE [21], while the modular MLLMs included Qwen2-VL [77], LLaVA-OneVision [41], and LLaVA1.5 [47].", + "bbox": [ + 511, + 680, + 905, + 771 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The results are depicted in Figure 4. Single Transformer-based MLLMs allocate between $60\\%$ and $80\\%$ of attention scores to image tokens across all layers when predicting tokens. In contrast, modular MLLMs such as Qwen2-VL and LLaVA-OneVision allocate only $10\\%$ to $30\\%$ of attention scores to image tokens across different layers. For LLaVA1.5, which does not update the ViT parameters during supervised fine-tuning (SFT), the image attention score is relatively high in the first two transformer layers but declines sharply in subsequent layers.", + "bbox": [ + 511, + 773, + 905, + 924 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 946, + 504, + 958 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/94d095fde53983aad3eb8a4956fd06eba46ff9fbee4031253bb8f8ccb00cc867.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodMMBench [50]MME [46]
Physical RelationCelebrity RelationPositionPostersCelebrity
Modular MLLM30.450.598.3134.0100.3
SAIL52.288.9160.0108.275.0
", + "bbox": [ + 94, + 111, + 480, + 200 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "From this experiment, we can conclude that Single Transformer-based MLLMs tend to allocate a significant portion of attention to previous image tokens during prediction. In contrast, modular MLLMs allocate a smaller portion of their attention directly to image tokens, indicating a less image-centric approach in their prediction mechanism.", + "bbox": [ + 89, + 303, + 482, + 396 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "These findings indicate that the Single Transformer model places more emphasis on grounding its predictions in the visual information. As the model undergoes data scaling, it allocates more effective computation to image tokens, thereby enhancing its capability as a vision-centric model.", + "bbox": [ + 89, + 398, + 483, + 474 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In summary, the attention pattern analysis underscores the Single Transformer's ability to robustly integrate visual context, enabling it to scale efficiently and potentially outperform modular MLLMs in vision-language tasks.", + "bbox": [ + 89, + 479, + 483, + 540 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3.3. Task-Specific Performance Analysis", + "text_level": 1, + "bbox": [ + 89, + 559, + 385, + 574 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We dissect SAIL's strengths and limitations through targeted case studies:", + "bbox": [ + 89, + 583, + 482, + 613 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Strengths: Spatial Reasoning. SAIL excels at tasks requiring precise spatial location. As shown in Table 6, under the setting of our data scaling experiment, it outperforms the modular counterpart by 61.7 points on the MME Position split and $21.8\\%$ on MMBench Physical Relation questions. The unified architecture likely enables tighter coupling between visual geometry and linguistic descriptions.", + "bbox": [ + 89, + 617, + 482, + 724 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Weaknesses: World Knowledge. Conversely, SAIL falls short in tasks that demand extensive world knowledge. As shown in Table 6 SAIL underperforms in the MME celebrity and art splits compared to the modular MLLM. This underperformance can be attributed to SAIL's lack of diverse domain-specific data during pretraining, a gap that was not sufficiently addressed during supervised fine-tuning. Modular MLLMs, with their pretrained vision encoders like CLIP [15, 62] or SigLIP [87], have a broader knowledge base and therefore handle such tasks more effectively. We hypothesize that scaling up SAIL's pretraining data diversity could help bridge this gap, enhancing its performance on knowledge-intensive tasks.", + "bbox": [ + 89, + 727, + 482, + 924 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3bb96e8beba6754511e44dc43585c34466a7f1e614f055714a6d3c6d31caaa15.jpg", + "table_caption": [ + "Table 6. Performance Comparison of SAIL and Modular MLLM in MMBench and MME Tasks: the strengths of SAIL in spatial reasoning tasks (MMBench Physical Relation and MME Position split) and its weaknesses in world knowledge tasks (MMBench Celebrity Relation and MME Celebrity and Posters splits)." + ], + "table_footnote": [], + "table_body": "
Exp. SettingVQAv2GQASQATQASEED-I
Default59.146.959.620.135.1
#1 No Img full attn57.845.258.716.233.8
#2 No pure text in PT56.342.148.618.332.4
", + "bbox": [ + 521, + 112, + 903, + 178 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 7. Ablation Study on Basic Factors for SAIL: This table presents the impact of different ablation settings on the performance of SAIL across VQAv2 [28], GQA [36], SQA [52], TQA [65], and SEED-I [40]. The default setting includes image full attention and the inclusion of pure text data in pretraining. Ablation #1 removes image full attention, and ablation #2 excludes pure text in pretraining.", + "bbox": [ + 511, + 181, + 906, + 282 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Empirical Observations on Basic Factors", + "text_level": 1, + "bbox": [ + 511, + 304, + 864, + 321 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To guide scalable training of single-transformer MLLMs, we conduct ablation studies on two critical design choices using SAIL-0.5B pretrained on 128M image-text pairs and fine-tuned on LLaVA-mix-665K. Performance is evaluated through zero-shot image classification after pretraining [71] and vision-language benchmarks after SFT.", + "bbox": [ + 511, + 328, + 906, + 419 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Bidirectional Attention for Image Patches with Multimodal Position Encoding. We compare two approaches for integrating image patches into the transformer: (1) Causal attention with 1D positional encoding, using a token to demarcate image rows. (2) Full bidirectional attention for image patches paired with multimodal rotary position embeddings (RoPE), which jointly encode spatial coordinates (e.g., 2D grid positions) and text token positions. As shown in Table 7, configuration of using bidirectional attention with multimodal RoPE significantly improves performance on vision-language tasks, with a particularly notable gain of $3.1\\%$ on TextVQA. This suggests that enabling cross-patch interactions during pretraining enhances visual representation learning and tightens cross-modal alignment.", + "bbox": [ + 511, + 421, + 908, + 648 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Interleaving Pure Text Data During Pretraining. We analyze the impact of mixing SlimPajama text data with image-text pairs during pretraining. The results, as presented in Table 7 #2, reveal that mixing in pure text data consistently improves performance across vision and language benchmarks. This finding underscores the importance of preserving language capabilities in the LLM when training Single Transformer models, as maintaining strong language skills is crucial for building a multimodal model capable of complex reasoning. Currently, incorporating text data in training is one of the effective methods to maintain the language abilities of the model.", + "bbox": [ + 511, + 648, + 908, + 830 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In conclusion, our ablation studies identify key design choices for training SAIL effectively. Using bi-directional attention with multimodal rotary position embeddings enhances visual perception, while incorporating pure text data preserves essential language capabilities for robust multimodal performance.", + "bbox": [ + 511, + 832, + 908, + 924 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 89, + 112, + 209, + 128 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we conducted an extensive analysis of Single Transformer-based MLLMs compared to modular MLLMs. Our investigation explored the unique properties of Single Transformers, including scalability, cross-modal information flow patterns, and visual representation capabilities. A series of experiments on our trained SAIL model demonstrated that this unified architecture achieves performance on vision-language benchmarks comparable to modular MLLMs while also functioning effectively as a vision backbone. Our findings highlight several advantages of Single Transformer architectures, such as superior data scalability, vision-centric information flow, and inherent capabilities as a powerful vision encoder. We hope our empirical findings will inspire further research to refine and enhance Single Transformer architectures, advancing the field of multimodal intelligence.", + "bbox": [ + 89, + 138, + 485, + 380 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 89, + 395, + 179, + 412 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In the appendix, we provide additional experimental details and results.", + "bbox": [ + 89, + 420, + 482, + 450 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Additional Experimental Details", + "text_level": 1, + "bbox": [ + 89, + 460, + 344, + 477 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Training Configurations. In this section, we provide the corresponding setups for our experiment series in the main paper, including the default setting, the data scaling series, the model scaling series, and ablation experiment settings. The detailed configurations are shown in Table 8.", + "bbox": [ + 89, + 483, + 482, + 558 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Evaluation Configurations. In the main paper, we measure the model performance on several benchmarks: VQAv2 [28], GQA [36], ScienceQA-IMG [52], TextVQA [65], POPE [44], MME [46], MMBench [50], and SEEDBench-IMG [40]. We normalized the performance to a full score of 100 and averaged the performance across these benchmarks to plot the curves shown in Figure 1(A) and Figure 3. The detailed experimental results are shown in Table 9.", + "bbox": [ + 89, + 559, + 482, + 694 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Additional Experimental Results", + "text_level": 1, + "bbox": [ + 89, + 705, + 346, + 720 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "A comparison of SAIL and LLaVA1.5. In this section, we conduct an experiment to compare SAIL with LLaVA1.5 [47]. In this experiment, our SAIL is trained on 512M image-text pairs in Pretraining Stage 1, followed by fine-tuning on the LLaVA-mix-665K dataset. To fairly compare the performance of the two models, we do not use the anyres strategy during SFT. Instead, we adopt the same image processing approach as LLaVA1.5, ensuring that the aspect ratio and number of image tokens are consistent across both models.", + "bbox": [ + 89, + 727, + 482, + 876 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The experimental results are presented in Table 10. Despite our model being trained on only 512M image-text pairs, which is significantly smaller than the CLIP pretrain-", + "bbox": [ + 89, + 878, + 483, + 925 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ing data used in the LLaVA1.5 model, the results show that our model achieves comparable performance to LLaVA1.5 across various benchmarks. Remarkably, our model even outperforms LLaVA1.5 on specific benchmarks such as DocVQA and ChartVQA.", + "bbox": [ + 511, + 114, + 903, + 189 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "These findings highlight the strong potential of Single Transformer models in terms of data scaling. Specifically, they suggest that even with a relatively smaller pretraining dataset, Single Transformer models can perform on par with, or even exceed, more extensively trained modular MLLMs like LLaVA1.5 when similar preprocessing and controlled variables are applied.", + "bbox": [ + 511, + 191, + 906, + 297 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Compare SAIL and LLaVA on MMVP. We compare SAIL and LLaVA1.5 [47] on MMVP [70] to dissect the behavior of the two models. The results are shown in Figure 5. From examples (A) and (B), we observe that SAIL performs better in perceiving minor regions and objects. Examples (C) and (D) illustrate that SAIL can more accurately distinguish the states of objects.", + "bbox": [ + 511, + 297, + 906, + 404 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Additional Experiments on Information Flow Pattern Analysis. In the main paper, we analyzed the distribution patterns of image attention scores for different Single Transformer-based MLLMs and modular MLLMs. The results showed that Single Transformer-based MLLMs allocate more attention weights to image tokens. However, this could be due to different models processing varying numbers of image tokens, where more image tokens lead to higher aggregated attention scores.", + "bbox": [ + 511, + 405, + 905, + 541 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To analyze this in a more controlled manner, we designed an additional experiment. Using the data scaling setup at 512M, we pretrained SAIL and its modular MLLM counterpart. After pretraining, we fine-tuned both models using the LLaVA-mix-665K dataset, fixing the resolution size to 224x224 during SFT, instead of using any resolution.", + "bbox": [ + 511, + 541, + 905, + 647 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The results, shown in Figure 6, reveal that SAIL allocates higher attention scores to image tokens across all transformer layers compared to the modular MLLM, particularly in medium layers $(+43.5\\%)$ in layer 14) and deep layers $(+41.2\\%)$ in layer 31).", + "bbox": [ + 511, + 648, + 905, + 724 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "From this, we can conclude that Single Transformer-based MLLMs tend to allocate a significant portion of attention to previous image tokens during prediction. In contrast, modular MLLMs allocate a smaller portion of their attention directly to image tokens, indicating a less image-centric approach in their prediction mechanism.", + "bbox": [ + 511, + 724, + 905, + 816 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Attention Map Visualization. In the main paper, we found that Single Transformer-based MLLMs allocate a large portion of attention weights to image tokens during inference, indicating a more vision-centric model. Here, we visualize the attention distribution of SAIL across different regions of the image when predicting tokens.", + "bbox": [ + 511, + 816, + 905, + 907 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The results in Figure 7 illustrate the attention maps for", + "bbox": [ + 532, + 907, + 903, + 924 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 946, + 504, + 958 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/0c8b865553e262229522d49de1c266ad4d74ff2efba965f1aceb91bc26705e7b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ExpModelLLMStage 1Stage 2SFT
DataLRDataLRDataLR
Figure 1(A)SAIL, point 32MMistral-7B-v0.1Standard Stage 1 Data (32M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(A)SAIL, point 128MMistral-7B-v0.1Standard Stage 1 Data (128M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(A), Table 6SAIL, point512MMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(B), Table 2SAILMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)Standard Stage 2 Data (1e-5, 5e-6)Standard SFT Data(1e-5,0)
Table 3, 4, 5SAILMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)----
Figure 3, Table 7SAIL-0.5BQwen2.5-0.5BStandard Stage 1 Data (128M image-text pairs) (5e-4, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 3SAIL-3BQwen2.5-3BStandard Stage 1 Data (128M image-text pairs) (1e-4, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 3SAIL-7BMistral-7B-v0.1Standard Stage 1 Data (128M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
", + "bbox": [ + 94, + 111, + 903, + 218 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/42cf6443894186feb0a1d9dae9b8337f49f0ae9fad351b32d6fc399fbad551af.jpg", + "table_caption": [ + "Table 8. Experimental Configurations for Various Settings. The table lists the models used, the specific LLM variants, the datasets, and learning rates (LR) applied during each training stage (Pretraining Stage 1, Pretraining Stage 2, and SFT). \"Standard Stage 1 Data\", \"Standard Stage 2 Data\" and \"Standard SFT Data\" are listed in Table 1. Specific points and tables/figures referred to in the text are also indicated." + ], + "table_footnote": [], + "table_body": "
ModelVQAv2GQASciQA-IMGTextVQAPOPEMMEMMBenchSEEDBench-IMGNorm(avg
Figure 1, modular MLLM, 32M76.9658.768.4858.6888.17159969.4470.3161.41
Figure 1, modular MLLM, 128M78.4759.7870.0559.8286.78163868.5768.1161.52
Figure 1, modular MLLM, 512M80.0662.3870.3457.8583.14137970.8269.8361.86
Figure 1, SAIL, 32M70.5157.9563.3231.6781.77142148.2261.5151.93
Figure 1, SAIL, 128M76.3660.9362.6156.8685.5145853.9466.6057.91
Figure 1, SAIL, 512M78.5162.0667.4863.9486.04153056.7168.8360.51
Figure 3, SAIL-3B67.353.263.830.966.9820.844.655.447.80
Figure 3, SAIL-0.5B59.146.959.620.159.8761.4538.535.139.92
", + "bbox": [ + 155, + 296, + 844, + 419 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/63cbc17f7f3795c8081971b405697dd160c73dc06136bdfd24067ac6eaeeb4cf.jpg", + "table_caption": [ + "Table 9. Detailed experimental results in the main paper." + ], + "table_footnote": [], + "table_body": "
MethodPretrainSFTVQAv2GQASciQA-IMGTextVQAPOPEMMBenchSEEDbenchDocVQAChartQAAI2DMMStaravg
LLaVA-1.5-336px [47]12.8B+558K665K78.562.066.858.285.964.366.128.118.254.832.458.3
SAIL512M665K77.861.668.056.486.661.369.829.321.558.737.159.1
", + "bbox": [ + 153, + 457, + 843, + 500 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 10. Comparison of SAIL and LLaVA1.5. We evaluate the models on VQAv2 [28], GQA [36], ScienceQA [52], TextVQA [65], POPE [44], MMBench [50], SEEDBench [40], DocVQA [56], ChartQA [55], AI2D [39] and MMStar [9].", + "bbox": [ + 89, + 508, + 906, + 539 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "specific tokens to the image portion when SAIL generates predictions for multimodal queries. The visualizations show that in the early transformer layers, the predicted tokens primarily focus on the salient regions of the image. As the model progresses to deeper layers, the attention shifts to areas more relevant to the predicted tokens. This behavior demonstrates that SAIL has the potential to function as a grounding model, effectively correlating text tokens with their corresponding image regions.", + "bbox": [ + 89, + 564, + 482, + 700 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In other words, during inference, the model incrementally concentrates attention weights on relevant regions, aiding in decision-making. This progressive focusing of attention signifies the model's capability to ground text tokens in the corresponding visual context, enhancing its performance in vision-language tasks.", + "bbox": [ + 89, + 700, + 482, + 791 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Visual Understanding Demonstration. We investigate several vision perception and reasoning capabilities of our SAIL. These include its ability to understand rich OCR information (Table 11), interpret real-world scenes (Table 12), comprehend scientific charts (Table 13), and analyze poster contents (Table 14).", + "bbox": [ + 89, + 791, + 482, + 881 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/eb6a28a28a20b99c926f16b7385ebc1e417e5d659893ab587113a8b6f7597b5d.jpg", + "image_caption": [ + "(A) Are there patterns on the easter eggs?" + ], + "image_footnote": [], + "bbox": [ + 183, + 161, + 305, + 258 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/7637efd9ee2d43b4d77ee2d1c219080912e7103759b698c9370945ebd8bfd6db.jpg", + "image_caption": [ + "GT: Yes; No" + ], + "image_footnote": [], + "bbox": [ + 313, + 162, + 436, + 258 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/a1c62c760920c7d865b90215f1f47dbfe4133eedf1aaf8f9b80698512acfbfef.jpg", + "image_caption": [ + "SAIL: Yes; No", + "(C) Are the birds flapping upward or downward?", + "GT: Upward; Downward", + "SAIL: Upward; Downward", + "LLaVA1.5: Upward; Upward", + "Figure 5. Comparison of SAIL and LLaVA1.5 on MMVP examples. SAIL demonstrates better performance in perceiving minor regions and objects, as well as more accurately distinguishing object states." + ], + "image_footnote": [], + "bbox": [ + 184, + 327, + 308, + 422 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/d6cdb7ea2a12e15ebee32a5cc29d4041d12e9a7a69f66eeb92347e9b7ed99910.jpg", + "image_caption": [ + "LLaVA1.5: Yes; Yes" + ], + "image_footnote": [], + "bbox": [ + 320, + 327, + 442, + 422 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/9064d8a14fd89c407062d6f65d9550a3b3b10c965b254fe31126fd6726721b04.jpg", + "image_caption": [ + "(B) Are there any words displayed on the vehicle's lightbar?", + "GT: Yes; No" + ], + "image_footnote": [], + "bbox": [ + 532, + 162, + 656, + 258 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/c743f0fe8f49bafa26967c3602456ccc131b4c41de5f8eefbc16ca5bcc4d4e12.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 162, + 787, + 258 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/5cf82c40e2027f2eec837df6873535efaf1d21cde37f0251e7f842f4a80e5417.jpg", + "image_caption": [ + "SAIL: Yes; No", + "LLaVA1.5: Yes; Yes", + "(D) Is the elephant's trunk raised or lowered?", + "GT: Raised; Lowered", + "SAIL: Raised; Lowered", + "LLaVA1.5: Lowered; Lowered" + ], + "image_footnote": [], + "bbox": [ + 535, + 325, + 658, + 421 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/d74c79b2594ba8ce53efc20e4751a99dfed385f78b166c083f55da888145dbd3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 325, + 787, + 421 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/86ea22e9302f393c6faa266a08d690c1d93155388ab664b38df9f56bb5bac5f9.jpg", + "image_caption": [ + "Figure 6. Image attention score allocation for SAIL and its modular MLLM counterpart. We compared the attention score allocation distribution for shallow layers, medium layers, and deep layers between these two models. The Single Transformer-based MLLM model significantly allocates a higher proportion of attention score to image tokens during prediction than the modular MLLM." + ], + "image_footnote": [], + "bbox": [ + 254, + 602, + 743, + 825 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Query: When was the travel agency founded?", + "text_level": 1, + "bbox": [ + 326, + 277, + 663, + 292 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/72403d5df5d4cd1878d5e33c5bd4d9fa9dd848225e493287b525c8fda5a85e1d.jpg", + "image_caption": [ + "Output token: \"2\", Layer 2" + ], + "image_footnote": [], + "bbox": [ + 223, + 299, + 493, + 435 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/ca7b4d2d7629b3566f0d32cb2b1bd1e264daec4f37ae471c87d0ab3de85774dd.jpg", + "image_caption": [ + "Output token: “2”, Layer 25" + ], + "image_footnote": [], + "bbox": [ + 504, + 299, + 772, + 435 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Query: What color is the Bicycle?", + "text_level": 1, + "bbox": [ + 372, + 482, + 617, + 496 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/14245205aa410e48bd6a91506bad91fa760f5c60d89237bfedd1e75ea248e219.jpg", + "image_caption": [ + "Output token: \"red\", Layer 0" + ], + "image_footnote": [], + "bbox": [ + 251, + 500, + 486, + 698 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/45ceef6727c7a3c12e1dcef96d22949a055822dcf3dcdc91bdcc1b96cfdab9b9.jpg", + "image_caption": [ + "Output token: \"red\", Layer 31", + "Figure 7. Visualization of SAIL's attention distribution across image regions during token prediction. In early transformer layers, attention primarily focuses on the salient regions of the image. As the model progresses to deeper layers, attention shifts to areas more relevant to the predicted tokens." + ], + "image_footnote": [], + "bbox": [ + 511, + 500, + 746, + 698 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Example 1: Understanding OCR Information in Webpage.", + "text_level": 1, + "bbox": [ + 122, + 306, + 508, + 321 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/20f7c01d41fb79fbaccfe67a2b0667fe76827939bc0931c034efc63a00dd6df2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 334, + 776, + 508 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "User Please output all the text information.", + "text_level": 1, + "bbox": [ + 138, + 511, + 377, + 523 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "SAIL Cornell University arXiv cs arXiv:2304.08485 Computer Science Computer Vision and Pattern Recognition Visual Instruction Tuning HaoTian Liu, Chunyuan Li, Qingyang Wu, Yong Jae Lee Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLM for general-purpose visual and language visual and language this http URL URL Early Experiments: LLMs demonstrate impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLAVA and GPT-4 achieves a new state-of-the-art accuracy of $92.53\\%$ . We make GPT-4 generated visual instruction tuning data, our model and code base publicly available. Comments: NeurIPS 2023 Oral; project page: this https URL Subjects: Computer Vision and Pattern Recognition (cs.CV); Artificial Intelligence (cs.AI); Computation and Language (cs.CL); Machine Learning (cs.LG) Cite as: arXiv:2304.08485 [cs.CV] (or arXiv:2304.08485v2 [cs.CV] for this version) https://doi.org/10.48550/arXiv.2304.08485 Submission history From: Haotian Liu [view email] [v1] Mon, 17 Apr 2023 17:59:25 UTC (4,360 KB) [v2] Mon, 11 Dec 2023 17:46:14 UTC (4,985 KB)", + "bbox": [ + 138, + 527, + 849, + 686 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 11. SAIL demonstrates a strong capability to extract information in OCR-rich scenarios.", + "bbox": [ + 215, + 717, + 772, + 731 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Example 2: Understanding Real-World Scene.", + "text_level": 1, + "bbox": [ + 124, + 244, + 426, + 260 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/cbf681ad40eed4080496a59ec1cab9482e6085a739b47fb697bc4e5fa99eabde.jpg", + "image_caption": [], + "image_footnote": [ + "User Please describe the image in detail." + ], + "bbox": [ + 179, + 271, + 774, + 569 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "SAIL There are several notable elements in the image:", + "text_level": 1, + "bbox": [ + 138, + 589, + 433, + 601 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "1. The highway is multi-lane, with multiple lanes visible. 2. The traffic is dense, with vehicles in various lanes. 3. The weather seems to be overcast, as indicated by the grayish tones of the image. 4. There are several traffic signs and signs visible, including a green highway sign indicating \"1/2 mile\" to the right, and a green sign with the number \"16\" and the word \"MILE\" on the left side of the image. 5. The background includes a hill or mountain, with trees and greenery visible. 6. There are also several power lines and electrical pylons visible along the sides of the highway. The image's focus is on the traffic and the surrounding environment. The vehicles are primarily in the center and right lanes of the highway, with some vehicles in the left lanes as well. The highway is flanked by lush greenery, including trees and grassy areas, suggesting that this is a rural or possibly a suburban area. The traffic appears to be moving at a moderate pace. The weather conditions suggest that this is a daytime scene, possibly during the morning or late afternoon. The image is taken from a perspective that suggests the viewer is in a vehicle on the highway, observing the traffic ahead. In summary, the image captures a typical highway scene with moderate traffic, surrounded by a mix of natural and built environments, under an overcast sky. The road is in good condition, and the vehicles are traveling smoothly in both directions.", + "bbox": [ + 174, + 601, + 849, + 746 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 12. SAIL demonstrates a strong capability about understanding the real-world scene.", + "bbox": [ + 225, + 779, + 761, + 792 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Example 3: Understanding the Scientific Charts.", + "text_level": 1, + "bbox": [ + 122, + 233, + 442, + 250 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/d4d1f138be91e3ae0a2bdd84b923df543b8646ec19b28b01a2a51525de73dedb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 260, + 475, + 391 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/61e34a4823cfcf7abfbd05b2e685db45e3739a868869559e15d3920fe25b1d5f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 392, + 367, + 455 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/6e114755f6e4fbd24b51e09e065d2fd1a5dd224fab94ab7cfcd565e63799f6e7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 392, + 473, + 455 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/2aa1f308a5e4d1cfb777d32372c4c543ebda75253e771dd662da3c40a4a9b1bf.jpg", + "image_caption": [ + "User Please describe the image in detail." + ], + "image_footnote": [ + "This image is a collage of various objects, foods, and animals, all of which are predominantly blue or have blue elements. The image is divided into sections labeled with letters from A to O. Here is a detailed description of each section: 1. A - Pumpkin: A blue pumpkin is depicted, which is a common symbol associated with autumn and Halloween. 2. B - Fish: A blue fish is shown, which could be a representation of a common aquatic animal. 3. C - M&M: A blue M&M candy is present.4. D - Milk and Soap: A glass of milk, a bar of soap, and a blue sponge.5. E - Bread and Jelly Beans: Blue wheat, a blue loaf of bread, and blue jelly beans.6. F - Castor Oil: A bottle of castor oil.7. G - Cheerios and Snacks: A box of Cheerios, a glass of a dark blue drink, a blue popsicle, a blue donut, and a blue muffin. 8. H - Vegetables: A collection of vegetables including a blue cucumber, green leafy vegetables, and a bunch of blue carrots.9. I - Apple and Stones: A green apple, a blue candy, and blue stones.10. J - Syrup and Candy: Sugar cubes, a bottle of blue syrup, and a blue candy bar.11. K - Coconut and Aloe: A blue coconut, a bottle of blue liquid, and a blue aloe vera plant.12. L - Pineapple and Juice: A blue pineapple, a blue banana, and a glass of blue juice.13. M - Candy Bar: A blue candy bar.14. N - Blueberries: Blueberries.15. O - Oval Object: A blue oval-shaped object." + ], + "bbox": [ + 181, + 455, + 344, + 598 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/88ca0006929491eddb21a3c71ddbb465d70cdc0896585136bc6f707c683132d0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 346, + 455, + 475, + 598 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/7f811cf5851728d6bfd1446690b26f8821065017714d04d06116c144ae5224a7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 477, + 260, + 774, + 359 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/5c84f7550aeaf587e504883e32f2563a103b1875b315867b44936956a2766de4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 477, + 359, + 625, + 444 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/c3f078f825632a0a8ecfc0f90209ccb5bb4ce6fa98f6b3253c233d20f4b9b7d4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 359, + 771, + 444 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/476bc9f3b006d0369e3d41f7a4f60b0559aa33a82c981e4c5ddb4b3946ce37ed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 477, + 445, + 771, + 513 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/957eb719d56772c3ecd3d2e4f2915db952b9755d843d25088697d3eafffc2835.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 477, + 513, + 772, + 604 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 13. SAIL demonstrates a strong capability about understanding the scientific charts.", + "bbox": [ + 227, + 790, + 759, + 804 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Example 4: Understanding Poster Content.", + "text_level": 1, + "bbox": [ + 122, + 258, + 405, + 275 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/fd5dd7896f1375e93dd6ec6d4a24135dfe379dae2d45342a2a5c556982984744.jpg", + "image_caption": [ + "User Please describe the image in detail." + ], + "image_footnote": [], + "bbox": [ + 179, + 282, + 591, + 532 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "SAIL At the top of the image, there is a decorative border with a pattern of hanging golden ornaments, which adds a festive and celebratory feel to the overall design. The background is plain white, which helps the colorful elements stand out. In the center of the image, the text \"Happy Sankranti\" is prominently displayed in a large, elegant, and stylized font. The text is in a warm orange color, which is traditionally associated with the festival. Below the text, there is a depiction of a woman dressed in traditional Indian attire, likely a saree, in shades of blue and green. She is engaged in the act of preparing a traditional Sankranti sweet, which is a common practice during the festival. The sweet is shaped like a pyramid and is decorated with intricate patterns, reflecting the artistic and cultural heritage of the region. To the left of the woman, there is a three-tiered pot, known as a \"patala\", which is a traditional Indian cooking vessel. The pot is adorned with a colorful design and is placed on a colorful circular mat, which is also a traditional element in Indian households. On the right side of the woman, there is a small hut with a thatched roof, which is a common architectural style in rural Indian villages. The hut is depicted in a simple and rustic manner, emphasizing the rural and traditional aspect of the scene. Overall, the image captures the essence of the Sankranti festival, highlighting the cultural and religious aspects of the celebration. The use of traditional clothing, the preparation of traditional sweets, and the depiction of a rural village scene all contribute to a sense of cultural authenticity and celebration.", + "bbox": [ + 138, + 550, + 849, + 720 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 14. SAIL demonstrates a strong capability about understanding the poster content.", + "bbox": [ + 233, + 763, + 753, + 777 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 113, + 187, + 128 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022. 2", + "[2] AI Anthropic. The claude 3 model family: Opus, sonnet, haiku. Claude-3 Model Card, 2024. 1", + "[3] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023.1, 2, 6", + "[4] Hangbo Bao, Li Dong, Songhao Piao, and Furu Wei. Beit: Bert pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021.3", + "[5] Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, and Sagnak Tasirlar. Introducing our multimodal models, 2023. 2, 5, 6", + "[6] Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. Conceptual 12m: Pushing web-scale image-text pretraining to recognize long-tail visual concepts. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3558-3568, 2021. 3", + "[7] Jun Chen, Deyao Zhu, Xiaogian Shen, Xiang Li, Zechun Liu, Pengchuan Zhang, Raghuraman Krishnamoorthi, Vikas Chandra, Yunyang Xiong, and Mohamed Elhoseiny. Minigpt-v2: large language model as a unified interface for vision-language multi-task learning. arXiv preprint arXiv:2310.09478, 2023. 2", + "[8] Jieneng Chen, Qihang Yu, Xiaohui Shen, Alan Yuille, and Liang-Chieh Chen. Vitamin: Designing scalable vision models in the vision-language era. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3", + "[9] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 5, 6, 10", + "[10] Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. In European Conference on Computer Vision, pages 19-35. Springer, 2024. 7", + "[11] Yangyi Chen, Xingyao Wang, Hao Peng, and Heng Ji. A single transformer for scalable vision-language modeling. Transactions on Machine Learning Research, 2024. 1, 2, 3, 4, 5, 6, 7", + "[12] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. arXiv preprint arXiv:2205.08534, 2022. 5, 6", + "[13] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing" + ], + "bbox": [ + 93, + 138, + 482, + 922 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "the gap to commercial multimodal models with open-source suites. Science China Information Sciences, 67(12):220101, 2024. 6", + "[14] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 2, 3, 6", + "[15] Mehdi Cherti, Romain Beaumont, Ross Wightman, Mitchell Wortsman, Gabriel Ilharco, Cade Gordon, Christoph Schuhmann, Ludwig Schmidt, and Jenia Jitsev. Reproducible scaling laws for contrastive language-image learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2818-2829, 2023. 1, 2, 3, 6, 8", + "[16] Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhang-hao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yong-hao Zhuang, Joseph E. Gonzalez, Ion Stoica, and Eric P. Xing. Vicuna: An open-source chatbot impressing gpt-4 with $90\\%$ * chatgpt quality, 2023. 1", + "[17] MMSegmentation Contributors. MMSegmentation: Openmmlab semantic segmentation toolbox and benchmark. https://github.com/open-mmlab/mmsegmentation, 2020.5", + "[18] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. InstructBLIP: Towards general-purpose vision-language models with instruction tuning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1", + "[19] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Peter Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdul-mohsin, et al. Scaling vision transformers to 22 billion parameters. In International Conference on Machine Learning, pages 7480-7512. PMLR, 2023. 3, 6", + "[20] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 3, 5", + "[21] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. In Advances in Neural Information Processing Systems, pages 52545-52567. Curran Associates, Inc., 2024. 1, 2, 3, 4, 5, 6, 7", + "[22] Haiwen Diao, Xiaotong Li, Yufeng Cui, Yueze Wang, Haoge Deng, Ting Pan, Wenxuan Wang, Huchuan Lu, and Xinlong Wang. Evev2: Improved baselines for encoder-free vision-language models. arXiv preprint arXiv:2502.06788, 2025. 5, 6", + "[23] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3" + ], + "bbox": [ + 516, + 116, + 903, + 922 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 506, + 958 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[24] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024. 6", + "[25] Alaaeldin El-Nouby, Michal Klein, Shuangfei Zhai, Miguel Angel Bautista, Alexander Toshev, Vaishaal Shankar, Joshua M Susskind, and Armand Joulin. Scalable pretraining of large autoregressive image models. arXiv preprint arXiv:2401.08541, 2024. 5", + "[26] Yuxin Fang, Wen Wang, Binhui Xie, Quan Sun, Ledell Wu, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. Eva: Exploring the limits of masked visual representation learning at scale. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 19358-19369, 2023. 3", + "[27] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, et al. Datacomp: In search of the next generation of multimodal datasets. Advances in Neural Information Processing Systems, 36:27092-27112, 2023. 3", + "[28] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 8, 9, 10", + "[29] Shuhao Gu, Jialing Zhang, Siyuan Zhou, Kevin Yu, Zhaohu Xing, Liangdong Wang, Zhou Cao, Jintao Jia, Zhuoyi Zhang, Yixuan Wang, et al. Infinity-mm: Scaling multimodal performance with large-scale and high-quality instruction data. arXiv preprint arXiv:2410.18558, 2024. 4, 5", + "[30] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14375-14385, 2024. 5, 6", + "[31] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 3", + "[32] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 5", + "[33] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, et al. Cogagent: A visual language model for gui agents. arXiv preprint arXiv:2312.08914, 2023. 2", + "[34] Xinyu Huang, Youcai Zhang, Jinyu Ma, Weiwei Tian, Rui Feng, Yuejie Zhang, Yaqian Li, Yandong Guo, and Lei" + ], + "bbox": [ + 91, + 114, + 483, + 922 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zhang. Tag2text: Guiding vision-language model via image tagging. arXiv preprint arXiv:2303.05657, 2023. 3", + "[35] Zilong Huang, Qinghao Ye, Bingyi Kang, Jiashi Feng, and Haoqi Fan. Classification done right for vision-language pretraining. In NeurIPS, 2024. 3", + "[36] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6700-6709, 2019. 8, 9, 10", + "[37] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International conference on machine learning, pages 4904-4916. PMLR, 2021. 3", + "[38] Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. Mistral 7b. arXiv preprint arXiv:2310.06825, 2023. 1", + "[39] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images, 2016. 5, 6, 10", + "[40] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023. 5, 6, 8, 9, 10", + "[41] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 3, 5, 6, 7", + "[42] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for clip training. In NeurIPS, 2023. 3", + "[43] Xianhang Li, Haoqin Tu, Mude Hui, Zeyu Wang, Bingchen Zhao, Junfei Xiao, Sucheng Ren, Jieru Mei, Qing Liu, Huangjie Zheng, Yuyin Zhou, and Cihang Xie. What if we recapture billions of web images with llama-3? arXiv preprint arXiv:2406.08478, 2024. 4, 5", + "[44] Yifan Li, Yifan Du, Kun Zhou, Jinping Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023. 6, 9, 10", + "[45] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23390-23400, 2023. 3", + "[46] Zijing Liang, Yanjie Xu, Yifan Hong, Penghui Shang, Qi Wang, Qiang Fu, and Ke Liu. A survey of multimodel large language models. In Proceedings of the 3rd International Conference on Computer, Artificial Intelligence and Control Engineering, pages 405-409, 2024. 5, 6, 8, 9", + "[47] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 6, 7, 9, 10" + ], + "bbox": [ + 516, + 114, + 903, + 921 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[48] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava next: Improved reasoning,OCR, and world knowledge, 2024.3,6", + "[49] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. 1, 2, 3", + "[50] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In European conference on computer vision, pages 216-233. Springer, 2024. 5, 6, 8, 9, 10", + "[51] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. Ocrbench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 67(12):220102, 2024. 5, 6", + "[52] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. Advances in Neural Information Processing Systems, 35:2507-2521, 2022. 5, 6, 8, 9, 10", + "[53] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 5, 6", + "[54] Gen Luo, Xue Yang, Wenhan Dou, Zhaokai Wang, Jiawen Liu, Jifeng Dai, Yu Qiao, and Xizhou Zhu. Mono-internvl: Pushing the boundaries of monolithic multimodal large language models with endogenous visual pre-training. arXiv preprint arXiv:2410.08202, 2024. 1, 2, 3, 5, 6", + "[55] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 10", + "[56] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF winter conference on applications of computer vision, pages 2200-2209, 2021. 10", + "[57] Sachin Mehta, Maxwell Horton, Fartash Faghri, Mohammad Hossein Sekhavat, Mahyar Najibi, Mehrdad Farajtabar, Oncel Tuzel, and Mohammad Rastegari. Catlip: Clipsevel visual recognition accuracy with 2.7 x faster pretraining on web-scale image-text data. arXiv preprint arXiv:2404.15653, 2024. 3", + "[58] Meta. Introducing meta llama 3: The most capable openly available llm to date, 2024. Accessed: 2024-04-18. 1", + "[59] OpenAI. Introducing chatgpt. OpenAI Blog, 2021.", + "[60] OpenAI. Gpt-4 technical report, 2023. 1", + "[61] OpenAI. Gpt-4v(ision) system card, 2023. 1, 2", + "[62] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 1, 2, 3, 6, 8" + ], + "bbox": [ + 91, + 114, + 482, + 922 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[63] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. LAION-5B: An open large-scale dataset for training next generation image-text models. In NeurlPS, 2022. 3, 4, 5", + "[64] Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatronlm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053, 2019. 5", + "[65] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8317-8326, 2019. 5, 6, 8, 9, 10", + "[66] Daria Soboleva, Faisal Al-Khateeb, Robert Myers, Jacob R Steeves, Joel Hestness, and Nolan Dey. SlimPajama: A 627B token cleaned and deduplicated version of RedPajama. https://cerebras.ai/blog/slimpajama-a-627b-token-cleaned-and-deduplicated-version-of-redpajama, 2023.4.5", + "[67] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 2, 6", + "[68] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 2", + "[69] Peter Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Adithya Jairam Vedagiri IYER, Sai Charitha Akula, Shusheng Yang, Jihan Yang, Manoj Middepogu, Ziteng Wang, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. Advances in Neural Information Processing Systems, 37:87310-87356, 2024. 6", + "[70] Shengbang Tong, Zhuang Liu, Yuexiang Zhai, Yi Ma, Yann LeCun, and Saining Xie. Eyes wide shut? exploring the visual shortcomings of multimodal llms. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9568-9578, 2024. 9", + "[71] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. Advances in Neural Information Processing Systems, 36:46830-46855, 2023. 3, 8", + "[72] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. Advances in Neural Information Processing Systems, 36:46830-46855, 2023. 6", + "[73] Haochen Wang, Junsong Fan, Yuxi Wang, Kaiyou Song, Tiancai Wang, Xiangyu Zhang, and Zhaoxiang Zhang. Bootstrap masked visual modeling via hard patches mining. arXiv preprint arXiv:2312.13714, 2023. 5", + "[74] Haochen Wang, Kaiyou Song, Junsong Fan, Yuxi Wang, Jin Xie, and Zhaoxiang Zhang. Hard patches mining for masked image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10375-10385, 2023. 5" + ], + "bbox": [ + 516, + 114, + 903, + 922 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[75] Haochen Wang, Anlin Zheng, Yucheng Zhao, Tiancai Wang, Ge Zheng, Xiangyu Zhang, and Zhaoxiang Zhang. Reconstructive visual instruction tuning. In International Conference on Learning Representations, 2025. 2", + "[76] Jiacong Wang, Bohong Wu, Haiyong Jiang, Zhou Xun, Xin Xiao, Haoyuan Guo, and Jun Xiao. World to code: Multimodal data generation via self-instructed compositional captioning and filtering. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4608-4623, 2024. 1", + "[77] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution, 2024. 1, 2, 3, 7", + "[78] Wenxuan Wang, Quan Sun, Fan Zhang, Yepeng Tang, Jing Liu, and Xinlong Wang. Diffusion feedback helps clip see better. arXiv preprint arXiv:2407.20171, 2024. 3", + "[79] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 2, 3, 6", + "[80] Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. SimVLM: Simple visual language model pretraining with weak supervision. In International Conference on Learning Representations, 2022. 3", + "[81] X.ai. Grok-1.5 vision preview. https://x.ai/blog/grok-1.5v, 2024.5, 6", + "[82] Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, and Jian Sun. Unified perceptual parsing for scene understanding. In Proceedings of the European conference on computer vision (ECCV), pages 418-434, 2018. 5, 6", + "[83] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 3", + "[84] Qiying Yu, Quan Sun, Xiaosong Zhang, Yufeng Cui, Fan Zhang, Yue Cao, Xinlong Wang, and Jingjing Liu. Capsfusion: Rethinking image-text data at scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14022-14032, 2024. 4, 5", + "[85] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 5, 6", + "[86] Mert Yuksekgonul, Federico Bianchi, Pratyusha Kalluri, Dan Jurafsky, and James Zou. When and why vision-language models behave like bags-of-words, and what to do about it? arXiv preprint arXiv:2210.01936, 2022. 5, 6", + "[87] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11975-11986, 2023. 3, 7, 8", + "[88] Tao Zhang, Xiangtai Li, Zilong Huang, Yanwei Li, Weixian Lei, Xueqing Deng, Shihao Chen, Shunping Ji, and Jiashi" + ], + "bbox": [ + 91, + 114, + 483, + 924 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Feng. Pixel-sail: Single transformer for pixel-grounded understanding. arXiv, 2025. 6", + "[89] Youcai Zhang, Xinyu Huang, Jinyu Ma, Zhaoyang Li, Zhaochuan Luo, Yanchun Xie, Yuzhuo Qin, Tong Luo, Yaqian Li, Shilong Liu, et al. Recognize anything: A strong image tagging model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1724-1732, 2024. 3", + "[90] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017. 5", + "[91] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 1, 2, 3" + ], + "bbox": [ + 516, + 114, + 903, + 354 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 19 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10462/963870cb-6527-42ff-97aa-d1b9f35a156b_model.json b/data/2025/2504_10xxx/2504.10462/963870cb-6527-42ff-97aa-d1b9f35a156b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a48ae55cab168b92a7c420c0778b478250aa23a7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/963870cb-6527-42ff-97aa-d1b9f35a156b_model.json @@ -0,0 +1,3694 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.091, + 0.089, + 0.318, + 0.11 + ], + "angle": 0, + "content": "ByteDance | Seed" + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.131, + 0.882, + 0.177 + ], + "angle": 0, + "content": "The Scalability of Simplicity: Empirical Analysis of Vision-Language Learning with a Single Transformer" + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.203, + 0.731, + 0.275 + ], + "angle": 0, + "content": "Weixian Lei* Jiacong Wang* Haochen Wang* \nXiangtai Li Jun Hao Liew Jiashi Feng Zilong Huang† \n*Equal contribution, † Project Lead \nBytedance Seed" + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.309, + 0.327, + 0.325 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.341, + 0.486, + 0.673 + ], + "angle": 0, + "content": "This paper introduces SAIL, a single transformer unified multimodal large language model (MLLM) that integrates raw pixel encoding and language decoding within a singular architecture. Unlike existing modular MLLMs, which rely on a pre-trained vision transformer (ViT), SAIL eliminates the need for a separate vision encoder, presenting a more minimalist architecture design. Instead of introducing novel architectural components, SAIL adapts mix-attention mechanisms and multimodal positional encodings to better align with the distinct characteristics of visual and textual modalities. We systematically compare SAIL's properties—including scalability, cross-modal information flow patterns, and visual representation capabilities—with those of modular MLLMs. By scaling both training data and model size, SAIL achieves performance comparable to modular MLLMs. Notably, the removal of pretrained ViT components enhances SAIL's scalability and results in significantly different cross-modal information flow patterns. Moreover, SAIL demonstrates strong visual representation capabilities, achieving results on par with ViT-22B in vision tasks such as semantic segmentation. Code and models are available1." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.702, + 0.222, + 0.717 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.727, + 0.484, + 0.879 + ], + "angle": 0, + "content": "The pursuit of multimodal intelligence has driven the development of Multimodal Large Language Models (MLLMs) [49, 61, 68], which typically adopt a modular design: a pre-trained vision encoder (e.g., CLIPViT [15, 62]) extracts image features, a Large Language Model (LLM) [2, 16, 38, 58-60] processes text, and a lightweight projector aligns the two modalities. This framework achieves strong performance through multi-stage pretraining, supervised fine-tuning (SFT), and post-training on multimodal datasets [3, 18, 49, 76, 77, 91]. While effective," + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.318, + 0.715, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.615, + 0.463, + 0.636, + 0.475 + ], + "angle": 0, + "content": "(A)" + }, + { + "type": "image", + "bbox": [ + 0.716, + 0.318, + 0.905, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.805, + 0.463, + 0.825, + 0.475 + ], + "angle": 0, + "content": "(B)" + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.486, + 0.907, + 0.584 + ], + "angle": 0, + "content": "Figure 1. (A) Data scaling curve for Modular Multimodal Large Language Model (MLLM) and SAIL, our Single Transformer-based MLLM. As pretraining data increases, the single transformer SAIL shows a sharper performance gain, demonstrating its superior data scalability. (B) Comparison to existing Single Transformer-based MLLMs: our SAIL pushes the performance boundaries on both vision tasks and vision-language tasks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.595, + 0.906, + 0.656 + ], + "angle": 0, + "content": "this modular MLLM paradigm inherently fragments multimodal processing, reinforces reliance on pretrained visual encoders, which may limit deployment flexibility and scalability [11, 21, 54]." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.657, + 0.907, + 0.853 + ], + "angle": 0, + "content": "A promising alternative is to eliminate the visual encoder entirely and process raw image patches and text tokens within a single Transformer. This unified architecture removes modality-specific modules, enabling parameter sharing and end-to-end learning of vision-language interactions. Previous works [11, 21, 54] have primarily explored the architecture design, training data, and methods of Single Transformer-based MLLMs. However, little exploration has been given to their fundamental properties, such as scalability, cross-modal information flow patterns, and visual representation capabilities. A deeper understanding of these properties is crucial for unlocking the full potential of Single Transformer-based MLLMs." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.856, + 0.909, + 0.901 + ], + "angle": 0, + "content": "In this work, we present an experimental analysis of the fundamental properties of Single Transformer-based MLLMs and compare them to modular MLLMs (e.g." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.10462v1 [cs.CV] 14 Apr 2025" + }, + { + "type": "page_footnote", + "bbox": [ + 0.109, + 0.887, + 0.379, + 0.9 + ], + "angle": 0, + "content": "1https://github.com/bytedance/SAIL" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.482, + 0.189 + ], + "angle": 0, + "content": "LLaVA [49]). Additionally, in the absence of a pre-trained visual encoder, Single Transformers have to learn visual representations from scratch. Thus, an intriguing question arises: can a trained Single Transformer emerge as a strong vision encoder?" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.193, + 0.483, + 0.403 + ], + "angle": 0, + "content": "We conduct a series of experiments to train and study our Single trAnformer model for vIsion and Language (SAIL). While we do not propose novel architecture designs, we introduce necessary modifications to enable the model to process different modalities in a unified architecture. In its micro architecture design, we address the different spatial characteristics of 2D images and 1D text data by employing a mixed attention mechanism: bidirectional attention for image patches and causal attention for text tokens, combined with multimodal rotary position embedding. Through model and data scaling, SAIL achieves performance on vision-language benchmarks comparable to modular MLLMs, while also functioning as a high-performing vision backbone, as shown in Figure 1." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.405, + 0.483, + 0.558 + ], + "angle": 0, + "content": "More concretely, our empirical analysis uncovers following striking advantages of Single Transformer architectures: (i) Superior Data Scaling: In controlled experiments, SAIL exhibits steeper performance gains as pretraining data scales. While LLaVA-style modular MLLMs initially perform well, our model's performance becomes very close to theirs when pretrained on 512M samples, as shown in Figure 1(A). This suggests that unified architectures can effectively leverage large-scale data and potentially match the performance of modular MLLMs." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.56, + 0.483, + 0.68 + ], + "angle": 0, + "content": "(ii) Vision Centric Information Flow Pattern: Through analysis of attention distributions, we observe that Single Transformers assign significantly higher attention scores to image tokens during token prediction compared to modular MLLMs. This indicates that the information flow in Single Transformer MLLMs is more direct, with visual tokens influencing prediction tokens more prominently, highlighting a vision-centric approach to decision-making." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.682, + 0.483, + 0.847 + ], + "angle": 0, + "content": "(iii) Vision Encoder Functioning: Our experiments further demonstrate that the pretrained Single Transformer inherently serves as a powerful vision encoder. Comprehensive evaluations on vision-centric tasks, such as image classification and semantic segmentation, show that the model learns rich visual representations during multimodal pretraining. These representations enhance its capacity for both semantic-level comprehension (e.g., object categorization) and pixel-level understanding (e.g., fine-grained segmentation masks), bridging high-level abstraction and low-level visual reasoning within a unified architecture." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.849, + 0.483, + 0.925 + ], + "angle": 0, + "content": "In summary, our findings indicate that Single Transformer-based MLLMs hold great promise in surpassing modular MLLMs in terms of leveraging large-scale data, forming direct vision-centric information pathways, and functioning as effective vision encoders. We hope" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.115, + 0.905, + 0.176 + ], + "angle": 0, + "content": "our empirical findings inspire further research to refine and enhance Single Transformer architecture, ultimately driving advancements in multimodal intelligence from a new perspective." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.193, + 0.655, + 0.209 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.22, + 0.897, + 0.236 + ], + "angle": 0, + "content": "2.1. Paradigms in Vision-Language Model Design" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.244, + 0.905, + 0.605 + ], + "angle": 0, + "content": "Modular MLLMs with Visual Encoders. The prevailing approach in MLLM design employs modular architectures [3, 49, 61, 68, 77] that rely on pretrained vision encoders (e.g., CLIP-ViT [15, 62], InternViT [14]) to process visual inputs. The visual features extracted from these frozen encoders are then aligned with LLM input spaces via linear [7, 49, 75, 91] or cross-attention layers [1, 33]. While this module design enables effective transfer of pretrained visual-language knowledge, it also introduces several limitations. First, incorporating a separate ViT encoder significantly slows down both training and inference, increasing deployment complexity and requiring costly infrastructure—especially when compared to a single transformer unified model. Second, common strategies for integrating visual features, such as direct mapping into LLM inputs [7, 49, 91] or sharing them across LLM layers [1, 33], often struggle to reconcile the inherent differences between images and text representations. Finally, as model scale, balancing the interactions between the the encoder, LLM, and alignment layers becomes increasingly challenging [11, 54]. Thus, in this work, we explore a single transformer-based MLLM architecture that eliminates the ViT encoder and alignment components to overcome these challenges." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.607, + 0.905, + 0.925 + ], + "angle": 0, + "content": "Single Transformer-based MLLMs Without Visual Encoders. Emerging research explores end-to-end architectures that process raw image patches and text tokens through a single Transformer, bypassing visual encoders entirely. These monolithic designs fall into two categories: continuous tokenization and discrete tokenization. Continuous tokenization, exemplified by Fuyu-8B [5] and SOLO [11], directly maps patches to LLM embeddings via linear projections, enabling flexible resolution handling but requiring massive pretraining data. Discrete tokenization, adopted by Chameleon [67] and Emu3 [79], employs VQ-VAE tokenizers to compress images into discrete tokens, trading pixel-level fidelity for generation capabilities. While later efforts such as EVE [21] and MonoInternVL [54] demonstrate the feasibility of encoder-free training, critical gaps remain: (1) Existing methods rely on extra designs and auxiliary loss [21], complicating training pipelines; (2) The scaling laws and fundamental properties of purely end-to-end trained models remain poorly understood; (3) Vision-language interaction in shared parameter spaces lacks systematic analysis—most prior MLLMs" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.482, + 0.22 + ], + "angle": 0, + "content": "default to causal attention for processing image-text sequences. In this work, we reveal that enabling bidirectional attention between image patches significantly enhances visual representation learning, addressing a key limitation in current designs. More importantly, our study bridges these gaps by establishing foundational principles for training scalable, self-contained single-transformer MLLMs." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.232, + 0.373, + 0.248 + ], + "angle": 0, + "content": "2.2. Vision Representation Learning" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.254, + 0.482, + 0.345 + ], + "angle": 0, + "content": "Learning effective vision representations is a core challenge in computer vision research, with extensive works [4, 19, 20, 26, 31, 80] dedicated to this problem. With the proliferation of large-scale web-sourced image-text datasets [6, 27, 63], recent methods leverage this data to train deep visual representations via three primary paradigms:" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.346, + 0.483, + 0.466 + ], + "angle": 0, + "content": "Text as Classification Labels. Early methods used textual descriptions as weak supervision by extracting categorical labels from captions. For example, frameworks like Tag2Text [34] and RAM [89] used ViTs [23] to predict noun-based pseudo-labels from datasets like CC12M [6]. CatLIP [57] scaled labels to millions using object-centric supervision, and SuperClass [35] directly used tokenized text tokens as classification categories." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.467, + 0.483, + 0.557 + ], + "angle": 0, + "content": "Image-Text Contrastive Learning Contrastive pretraining, as exemplified by CLIP [15, 62] and ALIGN [37], aligns global image-text embeddings within a shared latent space. Subsequent works [8, 14, 42, 45, 78, 87] focused on enhancing CLIP's performance and improving training efficiency." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.558, + 0.483, + 0.753 + ], + "angle": 0, + "content": "Text as Autoregressive Targets. Caption generation as a pretext task is another approach for visual representation learning. SimVLM [80] trains encoder-decoder architectures to autoregressively predict captions, while CapPa [71] trains vision encoders through sequence prediction. These methods often retain modular designs or auxiliary components like contrastive losses [83]. Our work aligns with this category but removes architectural fragmentation by jointly modeling image patches and text tokens in a single Transformer. We find that the pre-trained Single Transformer learns transferable vision representations, enabling it to handle downstream multimodal understanding tasks and function as a vision encoder without modifications." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.768, + 0.483, + 0.804 + ], + "angle": 0, + "content": "3. SAIL: Training a Single Transformer for Vision and Language" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.811, + 0.279, + 0.825 + ], + "angle": 0, + "content": "3.1. Model Architecture" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.834, + 0.483, + 0.925 + ], + "angle": 0, + "content": "SAIL is built upon a unified Transformer architecture (Figure 2(A)) that processes multimodal inputs through streamlined, modality-specific preprocessing. For text, raw input is tokenized using the language model's tokenizer and then transformed into embeddings via the textual embedding module. For images, we partition the input into fixed-" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.115, + 0.905, + 0.296 + ], + "angle": 0, + "content": "size patches and project them into continuous embeddings via a linear projection. Additionally, we maintain a list of special tokens explicitly designed for visual modality encoding: and tokens mark the beginning and end of an image patch span, respectively. In multimodal scenarios, such as image-text pairs, these embeddings are concatenated into a single sequence and fed into the Transformer, enabling joint cross-modal interactions through unified self-attention layers. This design eliminates the need for modality-specific encoders, which efficiently processess heterogeneous data within a single transformer framework." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.315, + 0.905, + 0.48 + ], + "angle": 0, + "content": "Bidirectional attention within image patches. While existing multimodal large language models (MLLMs) [41, 48, 49, 91] predominantly adopt causal attention for autoregressive sequence modeling, our experiments reveal that enabling full bidirectional attention among tokens from the same image significantly enhances visual representation learning and boosts downstream vision-language task performance. Note that previous Single Transformer works [11, 21, 54, 79] have only utilized causal attention, without exploring the potential of mixed attention mechanisms." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.499, + 0.905, + 0.696 + ], + "angle": 0, + "content": "As illustrated in Figure 2(B), for SAIL we implement a mixed attention scheme: (1) For text tokens, we preserve causal attention to maintain autoregressive generation capabilities, allowing each token to attend only to its predecessors. (2) For image tokens, we activate full bidirectional attention within each image patch group, empowering every visual token to interact with all others in the same image. This design captures holistic spatial relationships and contextual dependencies among visual elements, addressing the under-explored potential of attention mechanisms in cross-modal alignment. The improved interaction paradigm not only refines vision-language feature fusion but also provides stronger inductive biases for complex reasoning tasks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.713, + 0.905, + 0.925 + ], + "angle": 0, + "content": "Multimodal Rotary Position Embeddings. Following [77], we implement Multimodal RoPE (M-RoPE) in SAIL to harmonize positional modeling for multimodal inputs. The method decomposes positional encoding into two axes: height, and width. For text tokens, all axes share uniform position IDs (aligned with 1D-RoPE), whereas for images, height/width IDs adaptively map to token coordinates, as is shown in Fig 2(C). Notably, position indexing is sequentially initialized across modalities (e.g., starting from images before extending to subsequent text), preserving inter-modal consistency. M-RoPE not only improves positional sensitivity but also constrains absolute position magnitudes for visual tokens, facilitating robust generalization to extended sequences in inference." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.127, + 0.118, + 0.506, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.113, + 0.868, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.404, + 0.907, + 0.475 + ], + "angle": 0, + "content": "Figure 2. Model architecture and micro-designs for SAIL. (A) Model Architecture: SAIL is a unified transformer that processes both images and texts without extra module designs. (B) Mixed Attention Mechanism: we adopt bidirectional attention for image patches from the same image and causal attention for text tokens. Examples for a multimodal sequence and a text sequence are provided. Colored squares represent \"allow to attend\" and white squares indicate \"prevent from attending\". (C) Multimodal RoPE: an illustration of the multimodal rotary position embedding for SAIL, with examples for a multimodal sequence and a text sequence." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.487, + 0.218, + 0.504 + ], + "angle": 0, + "content": "3.2. Pretraining" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.511, + 0.483, + 0.557 + ], + "angle": 0, + "content": "We apply a two-stage curriculum to progressively strengthen the visual perception of SAIL while preserving its inherent language capabilities." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.558, + 0.483, + 0.725 + ], + "angle": 0, + "content": "Stage 1: Accelerated Visual Knowledge Acquisition. In this stage, we pretrain SAIL on large-scale image-text pairs to rapidly bootstrap its visual understanding. To maximize data throughput, we uniformly resize all images to a lower resolution (e.g., \\(224 \\times 224\\)), reducing multimodal sequence lengths and enabling the model to process more samples within fixed training time. To prevent catastrophic forgetting of linguistic knowledge, we interleave pure text corpora with multimodal data during training. This hybrid approach ensures efficient exposure to visual patterns while maintaining robust language proficiency." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.727, + 0.483, + 0.893 + ], + "angle": 0, + "content": "Stage 2: Enhancing Any-Resolution Image Understanding. Real-world applications require robustness to images of varying resolutions and aspect ratios, such as documents, charts, or infographics. Following prior works [11, 21], we extend pretraining with an any-resolution strategy: images retain their native resolutions during processing, and positional embeddings adapt dynamically to arbitrary spatial dimensions. This stage further refines SAIL's ability to model fine-grained visual details (e.g., tabular structures, text-rich graphics) while continuing to incorporate text-only data for language capability preservation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.894, + 0.483, + 0.924 + ], + "angle": 0, + "content": "Pretraining Objective. Throughout both stages, we optimize the standard language modeling loss only on text to-" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.488, + 0.907, + 0.518 + ], + "angle": 0, + "content": "kens. Image patches and special visual tokens are excluded from loss computation." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.562, + 0.73, + 0.579 + ], + "angle": 0, + "content": "3.3. Supervised Fine-tuning" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.596, + 0.907, + 0.716 + ], + "angle": 0, + "content": "During the Supervised Fine-tuning (SFT) stage, we train SAIL on publicly available, multi-source instruction datasets to enhance its understanding of complex linguistic instructions and diverse dialogue patterns critical for real-world deployment. This phase fine-tunes the entire network architecture, focusing on aligning the model's responses with human intent through exposure to varied instructional formats and multimodal interactions." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.728, + 0.909, + 0.925 + ], + "angle": 0, + "content": "Table 1 shows the details of training datasets for pretraining and supervised fine-tuning (SFT) across all stages. During Stage 1 pretraining, we utilized mixed multimodal and pure text datasets including Recap-DataComp-1B [43] and SlimPajama [66], with images at a resolution of \\(224\\mathrm{x}224\\) totaling 512M image-text pairs. In Stage 2, the pretraining datasets include Capfusion [84], self-curated OCR data from LAION COCO [63], InfinityMM Stage 2 subset [29], and SlimPajama, utilizing the any resolution (AnyRes) strategy, with a combined total of 86M image-text pairs along with text data. The SFT stage employed the InfinityMM Stage 3 subset, processed at any resolution, containing 6M image-text pairs." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.959 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.096, + 0.112, + 0.48, + 0.234 + ], + "angle": 0, + "content": "
StageDatasetImg.ResNum
Pretraining S1Recap-DataComp-1B [43]224x224512M
SlimPajama [66]-
Pretraining S2Capfusion [84]AnyRes60M
OCR from LAION COCO [63]7M
InifinityMM Stage 2 subset [29]19M
SlimPajama [66]-
SFTInifinityMM Stage3 [29]AnyRes6M
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.239, + 0.483, + 0.324 + ], + "angle": 0, + "content": "Table 1. Details of training datasets used across all stages. \"Img.Res\" refers to the image resolution settings applied during each training stage. All datasets listed are publicly available. Note that these settings represent the default configuration for standard SAIL training, while separate settings are used for scaling experiments and ablation studies." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.332, + 0.216, + 0.349 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.357, + 0.298, + 0.374 + ], + "angle": 0, + "content": "4.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.38, + 0.483, + 0.516 + ], + "angle": 0, + "content": "Evaluation Benchmarks. For evaluation of vision and language tasks, we evaluate SAIL and existing MLLMs on a broad range of multimodal benchmarks. Specifically, MLLM benchmarks encompass MMBench-EN [50], SEEDBench-IMG [40], MMVet [85], MME [46], HallusionBench [30], MathVistaMINI [53], and OCR-Bench [51]. Visual question answering benchmarks include TextVQA [65], ScienceQA-IMG [52], AI2D [39], MMStar [9], RealWorldQA [81]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.516, + 0.483, + 0.576 + ], + "angle": 0, + "content": "For evaluation of vision representation learning, we conduct experiments on ImageNet-1K [20] for image classification, ADE20K for semantic segmentation [90], and ARO [86] for attribute, relation, and ordering." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.577, + 0.483, + 0.789 + ], + "angle": 0, + "content": "Implementation Details. For pretraining, we initialize SAIL from the Mistral-7B-v0.1 base LLM and set the patch size to 14. We modify Megatron [64] to support SAIL's multimodal input. Pretraining uses 128 NVIDIA A100 80G GPUs with 2-way tensor parallelism and 64-way data parallelism. The learning rate is set at 5e-5 and decays cosinely to a minimum of 5e-6. For training efficiency, we concatenate sequences from different data samples into one long sequence of 32,768 tokens, adjusting the attention mask to ensure that tokens from different samples do not attend to each other. We use a round-robin approach to interleave image-text packed sequences and pure text packed sequences, configuring the global batch to contain approximately 16K image-text pairs." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.789, + 0.483, + 0.849 + ], + "angle": 0, + "content": "For Supervised Fine-Tuning (SFT), the global batch size is set to 512. Training is performed for one epoch with a maximum learning rate of 1e-5, following a linear warm-up phase and then transitioning to a cosine decay schedule." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.849, + 0.483, + 0.926 + ], + "angle": 0, + "content": "For vision, we load the checkpoint after Stage 1 pretraining and keep it frozen for downstream evaluations, including (1) image classification on ImageNet-1K [20], (2) semantic segmentation on ADE20K [90], and (3) attribute, relation and, ordering on the ARO benchmark [86]. Specif" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.115, + 0.907, + 0.312 + ], + "angle": 0, + "content": "ically, (1) for image classification, we utilize an attention-based classifier [25] with 90 epochs of linear probing, where detailed configurations are mostly obtained from common practices [32, 73, 74]. Images are resized to \\(224 \\times 224\\) and the global batch size is 8,192 across 8 A100 (80G) GPUs. (2) For semantic segmentation, we adopt ViT-Adapter [12] with UperNet [82] as the segmentation decoder. The implementation is based on MMSegmentation [17] with 80k training iterations. The input resolution is \\(512 \\times 512\\) and the global batch size is 16 across 8 A100 (80G) GPUs. (3) For attribute, relation and, ordering, we regard the negative of the caption loss over each image-text pair as the similarity metric for retrieval." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.323, + 0.715, + 0.339 + ], + "angle": 0, + "content": "4.2. Experimental Results" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.346, + 0.797, + 0.361 + ], + "angle": 0, + "content": "4.2.1. Results on Vision Language Tasks" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.365, + 0.907, + 0.699 + ], + "angle": 0, + "content": "As shown in Table 2, we compare SAIL against existing MLLMs across 13 vision-language benchmarks. SAIL consistently outperforms other Single Transformer-based models like Fuyu [5], EVE [21], SOLO [11], MonoInternVL [54], and EVE2 [22] across diverse vision-language tasks. This demonstrates that SAIL can achieve significant performance gains and push the boundaries of Single Transformer-based MLLMs without needing extra component designs or auxiliary training losses. Moreover, when compared to methods employing discrete vision tokens (e.g., Chameleon and Emu3), SAIL demonstrates superior performance. These results validate that scaling up single-transformer pretraining effectively enhances cross-modal alignment between images and text. Compared to the state-of-the-art modular MLLM LLaVA-OneVision [41], SAIL achieves comparable performance on some benchmarks, such as MMStar, SEEDBench-IMG, and RealWorldQA. While the performance of Single Transformer-based MLLMs still lags behind modular MLLMs in certain areas, we hypothesize that scaling the pretraining data volume or incorporating higher-quality instruction-tuning data will bridge the remaining performance gap." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.708, + 0.859, + 0.723 + ], + "angle": 0, + "content": "4.2.2. Results on Vision Representation Learning" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.727, + 0.907, + 0.789 + ], + "angle": 0, + "content": "In this section, we compare the quality of learned visual representations of our SAIL with other Single Transformer-based alternatives, including EVE [21], EVE2 [22], and SOLO [11]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.789, + 0.909, + 0.925 + ], + "angle": 0, + "content": "Classification and Segmentation. As demonstrated in Table 3, our method, SAIL, achieves a Top-1 accuracy of \\(84.95\\%\\) and a Top-5 accuracy of \\(97.59\\%\\) on the validation set of ImageNet-1K [20], significantly outperforming state-of-the-art alternatives [11, 21, 22]. In the segmentation task, SAIL also demonstrates superior performance with an mIoU of \\(55.30\\%\\), an mAcc of \\(67.24\\%\\), and an aAcc of \\(84.87\\%\\), illustrated in Table 3. These results indicate that SAIL is effective in both classification and segmentation tasks, of" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.095, + 0.112, + 0.904, + 0.344 + ], + "angle": 0, + "content": "
Method#Param#Data#VtokenGeneral VQAHallucinationMath&knowledgeOCR VQA
MMS*MMBenSEEDIMMVMMERWQAPOPEHalluSQA1MathVTQAAI2DOCRB
Modular MLLMs:
InternVL-1.5 [13]2.2B-/-332846.770.969.839.3190257.988.337.384.941.370.569.8654
QwenVL-Chat [3]7B7.2B / 50M25634.560.658.2-184849.3-36.868.235.361.545.9488
LLVA-1.5 [47]7B0.4B+ / 665K57633.164.364.330.5185954.885.927.666.825.546.154.8318
LLVA-1.6 [48]7B0.4B+ / 760K288037.667.464.743.9184257.886.427.670.232.564.966.6532
Cambrian-1 [69]8B10B+ / 7M57650.775.974.7--64.2-30.680.448.171.773.0-
LLVA-OneVision [41]7B10B+ / 3.2M729060.981.774.858.8199865.5--96.656.1-81.6-
Single Transformer-based MLLMs:
Fuyu [5]8B-/--34.410.759.321.4-43.78429.856.830.2-46.8366
Chameleon [67]7B1.4B+ / 1.8M102431.131.130.68.31703919.417.147.222.54.846.07.0
EVE [21]7B33M / 1.8M2304-52.364.625.71628-85.0-64.9-56.861.0398
SOLO [11]8B43.7M / 2M102435.867.764.430.4126044.778.640.473.332.925.061.4126
Mono-InternVL [54]3B1.3B / 7M6400-65.567.440.11875--45.793.645.772.668.6767
Emu3 [79]8B-/-16K46.658.568.237.2-57.485.231.789.231.364.770.0687
EVE2 [22]7B92M / 7.3M2500-66.371.445.0170962.487.6-96.2-71.174.8702
SAIL7B600M / 6M360053.170.172.946.3171963.985.854.293.357.077.176.7783
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.348, + 0.907, + 0.446 + ], + "angle": 0, + "content": "Table 2. Comparison with existing vision-language models on various vision-language benchmarks, including MMS*: MMStar [9]; MMBen:MMBench-EN [50]; SEED:SEEDBench-Img [40]; MMV:MMVet [85]; MME [46]; POPE [44]; Hallu: HallusionBench [30]; SQA: ScienceQA-Img [52]; TVQA: TextVQA [65]; MathV: MathVistaMINI [53]; AI2D [39]; RWQA: RealWorldQA [81]; OCRB:OCR-Bench [51]. Note that #A-Param denotes the number of activated parameters; #Data represents the pre-training / fine-tuning data volume; #Vtoken indicates the maximum image patch tokens. For MME, we report the sum of perception and cognition scores. The top two results are highlighted in bold and underline, respectively. All results are derived from those reported in other papers and the official reproduction results from the OpenCompass leaderboard [24]. Our results are obtained by VLMEvalKit [24]." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.455, + 0.482, + 0.56 + ], + "angle": 0, + "content": "
MethodClassificationSegmentation
Top-1Top-5mIoUmAccaAcc
EVE [21]42.0365.7727.1235.8972.91
EVE2 [22]44.8669.4140.8553.5379.31
SOLO [11]59.1080.8935.1144.8176.02
SAIL84.9597.5955.3067.2484.87
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.566, + 0.483, + 0.609 + ], + "angle": 0, + "content": "Table 3. Comparison on image classification and semantic segmentation with other encoder-free approaches. Our SAIL outperforms other alternatives by a large margin." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.625, + 0.475, + 0.726 + ], + "angle": 0, + "content": "
Method#Data#ParamImageNet-1KADE20K
OpenCLIP-H [15]2B0.6B84.4-
OpenCLIP-G [15]2B1.8B86.239.3†
ViT-22B [19]3B22B89.555.3
InternViT [14]6B6B88.258.7
SAIL0.5B7B85.055.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.73, + 0.483, + 0.8 + ], + "angle": 0, + "content": "Table 4. Comparison on image classification and semantic segmentation with other vision backbones. \\(\\dagger\\) indicates training with head tuning using UperNet [82], while others are based on ViT-Adapter [12]. SAIL, with significantly less training data, achieves competitive performance." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.817, + 0.483, + 0.893 + ], + "angle": 0, + "content": "fering substantial improvements over existing methods. In Table 4, even when comparing with other state-of-the-art vision backbones, our SAIL manages to achieve remarkable competitive performance with significantly less training data, demonstrating the scaling property of SAIL." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.894, + 0.484, + 0.925 + ], + "angle": 0, + "content": "Attribute, Relation, and Ordering. To systematically evaluate the ability of SAIL to understand different types of" + }, + { + "type": "table", + "bbox": [ + 0.517, + 0.455, + 0.905, + 0.616 + ], + "angle": 0, + "content": "
MethodRelationAttributeOrder
COCOFlickr30K
OpenCLIP-H [15]49.964.632.640.4
OpenCLIP-G [15]49.965.633.038.3
CLIP-B/32 [62]59.262.948.157.9
CLIP-L/14 [62]61.261.746.856.8
InternViT [14]59.666.073.476.3
NegCLIP [86]81.071.086.091.0
CapPa [72]86.785.798.899.2
SAIL100.099.5100.0100.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.622, + 0.907, + 0.665 + ], + "angle": 0, + "content": "Table 5. Comparison on attribute, relation, and ordering (ARO) with other vision backbones. SAIL almost encodes compositional relationships between objects and attributes perfectly." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.678, + 0.905, + 0.754 + ], + "angle": 0, + "content": "relationships, attributes, and order information, we conduct experiments on the ARO benchmark [86]. As demonstrated in Table 5, SAIL encodes compositional relationships between objects and attributes almost perfectly, significantly surpassing other state-of-the-art vision backbones." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.754, + 0.905, + 0.8 + ], + "angle": 0, + "content": "For additional vision-related tasks, please refer to PixelSAIL [88] for SAIL's downstream capabilities in pixel-grounded understanding." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.808, + 0.803, + 0.824 + ], + "angle": 0, + "content": "4.3. Properties of Single Transformer" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.83, + 0.692, + 0.845 + ], + "angle": 0, + "content": "4.3.1. Scaling Properties." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.849, + 0.905, + 0.925 + ], + "angle": 0, + "content": "Model Scaling: We selected models of different sizes: SAIL-0.5B, SAIL-3B, and SAIL-7B (SAIL by default) for our experiments. Each model underwent Stage 1 pretraining on a mixed multimodal and pure text dataset, encountering 512M image-text pairs. Subsequently, they were fine" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.112, + 0.288, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.288, + 0.112, + 0.481, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.248, + 0.483, + 0.306 + ], + "angle": 0, + "content": "Figure 3. Model scaling of SAIL. Left: As the model size increases, the training language modeling loss gradually decreases. Right: As the model size increases, performance on downstream VLM tasks progressively improves." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.329, + 0.483, + 0.39 + ], + "angle": 0, + "content": "tuned on the LLaVA-mix-665K dataset using the any resolution (anyres) strategy. We evaluated the models based on their performance on vision and language benchmarks after supervised fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.392, + 0.483, + 0.573 + ], + "angle": 0, + "content": "The normalized performance of SAIL against model size is plotted in Figure 3. As the model size scales up, we observe a corresponding enhancement in performance. Additionally, as shown on the left side of Figure 3, the training language modeling loss decreases with increasing model size. This reduction in training loss indicates that larger models have a greater capacity to learn multimodal alignments effectively, enabling them to capture complex relationships between vision and language more accurately. The improved learning capacity directly translates to better performance on downstream VLM tasks, showcasing the benefits of scaling up the Single Transformer architecture." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.575, + 0.483, + 0.877 + ], + "angle": 0, + "content": "Data Scaling: we compared SAIL with its modular MLLM counterpart. For the modular MLLM, we used SigLIPSO [87] as the vision encoder, and the language model shared the same architecture and initialization parameters as SAIL. Both models were pre-trained using Pretraining stage-1 setting, with SAIL encountering 32M, 128M, and 512M image-text pairs during training, followed by fine-tuning on the LLaVA-mix-665K dataset. All parameters of both models are trainable. Both models employ an identical number of input tokens for images and text. The normalized performance of both models is plotted in Figure 1(A). The results show that in the low-data regime (32M), SAIL's performance lags behind the modular MLLM, likely due to SigLIP's prior training on 40B samples. However, as the data scales, SAIL exhibits a steeper performance curve, indicating more promising data scaling properties. At 512M image-text pairs, SAIL achieves performance comparable to the modular MLLM in our evaluation subset. This demonstrates the single transformer's superior data scalability, even without a pretrained vision encoder." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.88, + 0.484, + 0.926 + ], + "angle": 0, + "content": "Quantitative results on evaluated benchmark tasks of model scaling and data scaling are tabulated in the appendix." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.113, + 0.903, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.323, + 0.907, + 0.407 + ], + "angle": 0, + "content": "Figure 4. Image Attention Score Allocation: The figure shows the proportion of image attention scores across different transformer layers for Single Transformer-based MLLM and modular MLLM when predicting tokens. Single Transformer-based MLLM generally allocates higher attention weights to image tokens compared to modular MLLM." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.432, + 0.74, + 0.446 + ], + "angle": 0, + "content": "4.3.2. Information Flow Pattern" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.453, + 0.907, + 0.679 + ], + "angle": 0, + "content": "Different attention pattern compared to modular MLLM: since our comparative experiments show that the Single Transformer model exhibits more promising data scaling properties, we conducted an analysis of the trained SAIL model and its modular MLLM counterpart. Specifically, we followed the methodology from FastV [10] to analyze the attention score distribution for each predicted token given an image and a user query. This analysis focuses on how much attention is allocated to image tokens during token prediction. We selected 1000 samples from various datasets including VQAv2, GQA, TextVQA, DocVQA, MME, SEEDBench-IMG, MMBench, and some self-curated dialog examples. For each model prediction, we computed the average attention scores assigned to previous image tokens by the output token." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.681, + 0.906, + 0.772 + ], + "angle": 0, + "content": "We conducted a comparative experiment between Single Transformer-based MLLMs and modular MLLMs. The Single Transformer-based MLLMs included SAIL, SOLO [11], and EVE [21], while the modular MLLMs included Qwen2-VL [77], LLaVA-OneVision [41], and LLaVA1.5 [47]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.774, + 0.906, + 0.925 + ], + "angle": 0, + "content": "The results are depicted in Figure 4. Single Transformer-based MLLMs allocate between \\(60\\%\\) and \\(80\\%\\) of attention scores to image tokens across all layers when predicting tokens. In contrast, modular MLLMs such as Qwen2-VL and LLaVA-OneVision allocate only \\(10\\%\\) to \\(30\\%\\) of attention scores to image tokens across different layers. For LLaVA1.5, which does not update the ViT parameters during supervised fine-tuning (SFT), the image attention score is relatively high in the first two transformer layers but declines sharply in subsequent layers." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.959 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.095, + 0.112, + 0.482, + 0.201 + ], + "angle": 0, + "content": "
MethodMMBench [50]MME [46]
Physical RelationCelebrity RelationPositionPostersCelebrity
Modular MLLM30.450.598.3134.0100.3
SAIL52.288.9160.0108.275.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.205, + 0.483, + 0.276 + ], + "angle": 0, + "content": "Table 6. Performance Comparison of SAIL and Modular MLLM in MMBench and MME Tasks: the strengths of SAIL in spatial reasoning tasks (MMBench Physical Relation and MME Position split) and its weaknesses in world knowledge tasks (MMBench Celebrity Relation and MME Celebrity and Posters splits)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.304, + 0.483, + 0.397 + ], + "angle": 0, + "content": "From this experiment, we can conclude that Single Transformer-based MLLMs tend to allocate a significant portion of attention to previous image tokens during prediction. In contrast, modular MLLMs allocate a smaller portion of their attention directly to image tokens, indicating a less image-centric approach in their prediction mechanism." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.399, + 0.484, + 0.476 + ], + "angle": 0, + "content": "These findings indicate that the Single Transformer model places more emphasis on grounding its predictions in the visual information. As the model undergoes data scaling, it allocates more effective computation to image tokens, thereby enhancing its capability as a vision-centric model." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.48, + 0.484, + 0.541 + ], + "angle": 0, + "content": "In summary, the attention pattern analysis underscores the Single Transformer's ability to robustly integrate visual context, enabling it to scale efficiently and potentially outperform modular MLLMs in vision-language tasks." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.56, + 0.387, + 0.575 + ], + "angle": 0, + "content": "4.3.3. Task-Specific Performance Analysis" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.584, + 0.483, + 0.614 + ], + "angle": 0, + "content": "We dissect SAIL's strengths and limitations through targeted case studies:" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.618, + 0.483, + 0.725 + ], + "angle": 0, + "content": "Strengths: Spatial Reasoning. SAIL excels at tasks requiring precise spatial location. As shown in Table 6, under the setting of our data scaling experiment, it outperforms the modular counterpart by 61.7 points on the MME Position split and \\(21.8\\%\\) on MMBench Physical Relation questions. The unified architecture likely enables tighter coupling between visual geometry and linguistic descriptions." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.728, + 0.483, + 0.925 + ], + "angle": 0, + "content": "Weaknesses: World Knowledge. Conversely, SAIL falls short in tasks that demand extensive world knowledge. As shown in Table 6 SAIL underperforms in the MME celebrity and art splits compared to the modular MLLM. This underperformance can be attributed to SAIL's lack of diverse domain-specific data during pretraining, a gap that was not sufficiently addressed during supervised fine-tuning. Modular MLLMs, with their pretrained vision encoders like CLIP [15, 62] or SigLIP [87], have a broader knowledge base and therefore handle such tasks more effectively. We hypothesize that scaling up SAIL's pretraining data diversity could help bridge this gap, enhancing its performance on knowledge-intensive tasks." + }, + { + "type": "table", + "bbox": [ + 0.522, + 0.113, + 0.905, + 0.179 + ], + "angle": 0, + "content": "
Exp. SettingVQAv2GQASQATQASEED-I
Default59.146.959.620.135.1
#1 No Img full attn57.845.258.716.233.8
#2 No pure text in PT56.342.148.618.332.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.183, + 0.907, + 0.283 + ], + "angle": 0, + "content": "Table 7. Ablation Study on Basic Factors for SAIL: This table presents the impact of different ablation settings on the performance of SAIL across VQAv2 [28], GQA [36], SQA [52], TQA [65], and SEED-I [40]. The default setting includes image full attention and the inclusion of pure text data in pretraining. Ablation #1 removes image full attention, and ablation #2 excludes pure text in pretraining." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.305, + 0.866, + 0.322 + ], + "angle": 0, + "content": "4.4. Empirical Observations on Basic Factors" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.329, + 0.907, + 0.42 + ], + "angle": 0, + "content": "To guide scalable training of single-transformer MLLMs, we conduct ablation studies on two critical design choices using SAIL-0.5B pretrained on 128M image-text pairs and fine-tuned on LLaVA-mix-665K. Performance is evaluated through zero-shot image classification after pretraining [71] and vision-language benchmarks after SFT." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.422, + 0.909, + 0.649 + ], + "angle": 0, + "content": "Bidirectional Attention for Image Patches with Multimodal Position Encoding. We compare two approaches for integrating image patches into the transformer: (1) Causal attention with 1D positional encoding, using a token to demarcate image rows. (2) Full bidirectional attention for image patches paired with multimodal rotary position embeddings (RoPE), which jointly encode spatial coordinates (e.g., 2D grid positions) and text token positions. As shown in Table 7, configuration of using bidirectional attention with multimodal RoPE significantly improves performance on vision-language tasks, with a particularly notable gain of \\(3.1\\%\\) on TextVQA. This suggests that enabling cross-patch interactions during pretraining enhances visual representation learning and tightens cross-modal alignment." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.65, + 0.909, + 0.832 + ], + "angle": 0, + "content": "Interleaving Pure Text Data During Pretraining. We analyze the impact of mixing SlimPajama text data with image-text pairs during pretraining. The results, as presented in Table 7 #2, reveal that mixing in pure text data consistently improves performance across vision and language benchmarks. This finding underscores the importance of preserving language capabilities in the LLM when training Single Transformer models, as maintaining strong language skills is crucial for building a multimodal model capable of complex reasoning. Currently, incorporating text data in training is one of the effective methods to maintain the language abilities of the model." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.833, + 0.909, + 0.925 + ], + "angle": 0, + "content": "In conclusion, our ablation studies identify key design choices for training SAIL effectively. Using bi-directional attention with multimodal rotary position embeddings enhances visual perception, while incorporating pure text data preserves essential language capabilities for robust multimodal performance." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.091, + 0.113, + 0.21, + 0.129 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.139, + 0.486, + 0.381 + ], + "angle": 0, + "content": "In this work, we conducted an extensive analysis of Single Transformer-based MLLMs compared to modular MLLMs. Our investigation explored the unique properties of Single Transformers, including scalability, cross-modal information flow patterns, and visual representation capabilities. A series of experiments on our trained SAIL model demonstrated that this unified architecture achieves performance on vision-language benchmarks comparable to modular MLLMs while also functioning effectively as a vision backbone. Our findings highlight several advantages of Single Transformer architectures, such as superior data scalability, vision-centric information flow, and inherent capabilities as a powerful vision encoder. We hope our empirical findings will inspire further research to refine and enhance Single Transformer architectures, advancing the field of multimodal intelligence." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.396, + 0.18, + 0.414 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.421, + 0.483, + 0.451 + ], + "angle": 0, + "content": "In the appendix, we provide additional experimental details and results." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.461, + 0.345, + 0.478 + ], + "angle": 0, + "content": "Additional Experimental Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.484, + 0.483, + 0.559 + ], + "angle": 0, + "content": "Training Configurations. In this section, we provide the corresponding setups for our experiment series in the main paper, including the default setting, the data scaling series, the model scaling series, and ablation experiment settings. The detailed configurations are shown in Table 8." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.56, + 0.483, + 0.695 + ], + "angle": 0, + "content": "Evaluation Configurations. In the main paper, we measure the model performance on several benchmarks: VQAv2 [28], GQA [36], ScienceQA-IMG [52], TextVQA [65], POPE [44], MME [46], MMBench [50], and SEEDBench-IMG [40]. We normalized the performance to a full score of 100 and averaged the performance across these benchmarks to plot the curves shown in Figure 1(A) and Figure 3. The detailed experimental results are shown in Table 9." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.706, + 0.348, + 0.722 + ], + "angle": 0, + "content": "Additional Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.728, + 0.483, + 0.877 + ], + "angle": 0, + "content": "A comparison of SAIL and LLaVA1.5. In this section, we conduct an experiment to compare SAIL with LLaVA1.5 [47]. In this experiment, our SAIL is trained on 512M image-text pairs in Pretraining Stage 1, followed by fine-tuning on the LLaVA-mix-665K dataset. To fairly compare the performance of the two models, we do not use the anyres strategy during SFT. Instead, we adopt the same image processing approach as LLaVA1.5, ensuring that the aspect ratio and number of image tokens are consistent across both models." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.88, + 0.484, + 0.926 + ], + "angle": 0, + "content": "The experimental results are presented in Table 10. Despite our model being trained on only 512M image-text pairs, which is significantly smaller than the CLIP pretrain-" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.115, + 0.905, + 0.19 + ], + "angle": 0, + "content": "ing data used in the LLaVA1.5 model, the results show that our model achieves comparable performance to LLaVA1.5 across various benchmarks. Remarkably, our model even outperforms LLaVA1.5 on specific benchmarks such as DocVQA and ChartVQA." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.192, + 0.907, + 0.298 + ], + "angle": 0, + "content": "These findings highlight the strong potential of Single Transformer models in terms of data scaling. Specifically, they suggest that even with a relatively smaller pretraining dataset, Single Transformer models can perform on par with, or even exceed, more extensively trained modular MLLMs like LLaVA1.5 when similar preprocessing and controlled variables are applied." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.299, + 0.907, + 0.405 + ], + "angle": 0, + "content": "Compare SAIL and LLaVA on MMVP. We compare SAIL and LLaVA1.5 [47] on MMVP [70] to dissect the behavior of the two models. The results are shown in Figure 5. From examples (A) and (B), we observe that SAIL performs better in perceiving minor regions and objects. Examples (C) and (D) illustrate that SAIL can more accurately distinguish the states of objects." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.406, + 0.906, + 0.542 + ], + "angle": 0, + "content": "Additional Experiments on Information Flow Pattern Analysis. In the main paper, we analyzed the distribution patterns of image attention scores for different Single Transformer-based MLLMs and modular MLLMs. The results showed that Single Transformer-based MLLMs allocate more attention weights to image tokens. However, this could be due to different models processing varying numbers of image tokens, where more image tokens lead to higher aggregated attention scores." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.542, + 0.906, + 0.648 + ], + "angle": 0, + "content": "To analyze this in a more controlled manner, we designed an additional experiment. Using the data scaling setup at 512M, we pretrained SAIL and its modular MLLM counterpart. After pretraining, we fine-tuned both models using the LLaVA-mix-665K dataset, fixing the resolution size to 224x224 during SFT, instead of using any resolution." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.649, + 0.906, + 0.725 + ], + "angle": 0, + "content": "The results, shown in Figure 6, reveal that SAIL allocates higher attention scores to image tokens across all transformer layers compared to the modular MLLM, particularly in medium layers \\((+43.5\\%)\\) in layer 14) and deep layers \\((+41.2\\%)\\) in layer 31)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.726, + 0.906, + 0.817 + ], + "angle": 0, + "content": "From this, we can conclude that Single Transformer-based MLLMs tend to allocate a significant portion of attention to previous image tokens during prediction. In contrast, modular MLLMs allocate a smaller portion of their attention directly to image tokens, indicating a less image-centric approach in their prediction mechanism." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.818, + 0.906, + 0.909 + ], + "angle": 0, + "content": "Attention Map Visualization. In the main paper, we found that Single Transformer-based MLLMs allocate a large portion of attention weights to image tokens during inference, indicating a more vision-centric model. Here, we visualize the attention distribution of SAIL across different regions of the image when predicting tokens." + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.909, + 0.905, + 0.925 + ], + "angle": 0, + "content": "The results in Figure 7 illustrate the attention maps for" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.959 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.095, + 0.112, + 0.905, + 0.219 + ], + "angle": 0, + "content": "
ExpModelLLMStage 1Stage 2SFT
DataLRDataLRDataLR
Figure 1(A)SAIL, point 32MMistral-7B-v0.1Standard Stage 1 Data (32M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(A)SAIL, point 128MMistral-7B-v0.1Standard Stage 1 Data (128M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(A), Table 6SAIL, point512MMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(B), Table 2SAILMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)Standard Stage 2 Data (1e-5, 5e-6)Standard SFT Data(1e-5,0)
Table 3, 4, 5SAILMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)----
Figure 3, Table 7SAIL-0.5BQwen2.5-0.5BStandard Stage 1 Data (128M image-text pairs) (5e-4, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 3SAIL-3BQwen2.5-3BStandard Stage 1 Data (128M image-text pairs) (1e-4, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 3SAIL-7BMistral-7B-v0.1Standard Stage 1 Data (128M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.227, + 0.907, + 0.281 + ], + "angle": 0, + "content": "Table 8. Experimental Configurations for Various Settings. The table lists the models used, the specific LLM variants, the datasets, and learning rates (LR) applied during each training stage (Pretraining Stage 1, Pretraining Stage 2, and SFT). \"Standard Stage 1 Data\", \"Standard Stage 2 Data\" and \"Standard SFT Data\" are listed in Table 1. Specific points and tables/figures referred to in the text are also indicated." + }, + { + "type": "table", + "bbox": [ + 0.156, + 0.297, + 0.845, + 0.42 + ], + "angle": 0, + "content": "
ModelVQAv2GQASciQA-IMGTextVQAPOPEMMEMMBenchSEEDBench-IMGNorm(avg
Figure 1, modular MLLM, 32M76.9658.768.4858.6888.17159969.4470.3161.41
Figure 1, modular MLLM, 128M78.4759.7870.0559.8286.78163868.5768.1161.52
Figure 1, modular MLLM, 512M80.0662.3870.3457.8583.14137970.8269.8361.86
Figure 1, SAIL, 32M70.5157.9563.3231.6781.77142148.2261.5151.93
Figure 1, SAIL, 128M76.3660.9362.6156.8685.5145853.9466.6057.91
Figure 1, SAIL, 512M78.5162.0667.4863.9486.04153056.7168.8360.51
Figure 3, SAIL-3B67.353.263.830.966.9820.844.655.447.80
Figure 3, SAIL-0.5B59.146.959.620.159.8761.4538.535.139.92
" + }, + { + "type": "table_caption", + "bbox": [ + 0.33, + 0.43, + 0.668, + 0.444 + ], + "angle": 0, + "content": "Table 9. Detailed experimental results in the main paper." + }, + { + "type": "table", + "bbox": [ + 0.155, + 0.458, + 0.844, + 0.501 + ], + "angle": 0, + "content": "
MethodPretrainSFTVQAv2GQASciQA-IMGTextVQAPOPEMMBenchSEEDbenchDocVQAChartQAAI2DMMStaravg
LLaVA-1.5-336px [47]12.8B+558K665K78.562.066.858.285.964.366.128.118.254.832.458.3
SAIL512M665K77.861.668.056.486.661.369.829.321.558.737.159.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.51, + 0.907, + 0.54 + ], + "angle": 0, + "content": "Table 10. Comparison of SAIL and LLaVA1.5. We evaluate the models on VQAv2 [28], GQA [36], ScienceQA [52], TextVQA [65], POPE [44], MMBench [50], SEEDBench [40], DocVQA [56], ChartQA [55], AI2D [39] and MMStar [9]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.565, + 0.483, + 0.701 + ], + "angle": 0, + "content": "specific tokens to the image portion when SAIL generates predictions for multimodal queries. The visualizations show that in the early transformer layers, the predicted tokens primarily focus on the salient regions of the image. As the model progresses to deeper layers, the attention shifts to areas more relevant to the predicted tokens. This behavior demonstrates that SAIL has the potential to function as a grounding model, effectively correlating text tokens with their corresponding image regions." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.702, + 0.483, + 0.792 + ], + "angle": 0, + "content": "In other words, during inference, the model incrementally concentrates attention weights on relevant regions, aiding in decision-making. This progressive focusing of attention signifies the model's capability to ground text tokens in the corresponding visual context, enhancing its performance in vision-language tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.792, + 0.483, + 0.882 + ], + "angle": 0, + "content": "Visual Understanding Demonstration. We investigate several vision perception and reasoning capabilities of our SAIL. These include its ability to understand rich OCR information (Table 11), interpret real-world scenes (Table 12), comprehend scientific charts (Table 13), and analyze poster contents (Table 14)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.184, + 0.162, + 0.307, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.163, + 0.437, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.183, + 0.264, + 0.447, + 0.278 + ], + "angle": 0, + "content": "(A) Are there patterns on the easter eggs?" + }, + { + "type": "image_caption", + "bbox": [ + 0.278, + 0.278, + 0.352, + 0.288 + ], + "angle": 0, + "content": "GT: Yes; No" + }, + { + "type": "image_caption", + "bbox": [ + 0.274, + 0.29, + 0.356, + 0.301 + ], + "angle": 0, + "content": "SAIL: Yes; No" + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.302, + 0.372, + 0.313 + ], + "angle": 0, + "content": "LLaVA1.5: Yes; Yes" + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.328, + 0.31, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.328, + 0.443, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.159, + 0.431, + 0.458, + 0.445 + ], + "angle": 0, + "content": "(C) Are the birds flapping upward or downward?" + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.445, + 0.378, + 0.456 + ], + "angle": 0, + "content": "GT: Upward; Downward" + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.457, + 0.384, + 0.468 + ], + "angle": 0, + "content": "SAIL: Upward; Downward" + }, + { + "type": "image_caption", + "bbox": [ + 0.229, + 0.47, + 0.39, + 0.481 + ], + "angle": 0, + "content": "LLaVA1.5: Upward; Upward" + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.163, + 0.657, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.163, + 0.788, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.472, + 0.264, + 0.842, + 0.278 + ], + "angle": 0, + "content": "(B) Are there any words displayed on the vehicle's lightbar?" + }, + { + "type": "image_caption", + "bbox": [ + 0.621, + 0.278, + 0.693, + 0.289 + ], + "angle": 0, + "content": "GT: Yes; No" + }, + { + "type": "image_caption", + "bbox": [ + 0.616, + 0.29, + 0.698, + 0.301 + ], + "angle": 0, + "content": "SAIL: Yes; No" + }, + { + "type": "image_caption", + "bbox": [ + 0.6, + 0.302, + 0.714, + 0.313 + ], + "angle": 0, + "content": "LLaVA1.5: Yes; Yes" + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.326, + 0.659, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.326, + 0.788, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.523, + 0.431, + 0.805, + 0.445 + ], + "angle": 0, + "content": "(D) Is the elephant's trunk raised or lowered?" + }, + { + "type": "image_caption", + "bbox": [ + 0.603, + 0.445, + 0.726, + 0.456 + ], + "angle": 0, + "content": "GT: Raised; Lowered" + }, + { + "type": "image_caption", + "bbox": [ + 0.597, + 0.457, + 0.731, + 0.468 + ], + "angle": 0, + "content": "SAIL: Raised; Lowered" + }, + { + "type": "image_caption", + "bbox": [ + 0.579, + 0.469, + 0.75, + 0.48 + ], + "angle": 0, + "content": "LLaVA1.5: Lowered; Lowered" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.496, + 0.907, + 0.525 + ], + "angle": 0, + "content": "Figure 5. Comparison of SAIL and LLaVA1.5 on MMVP examples. SAIL demonstrates better performance in perceiving minor regions and objects, as well as more accurately distinguishing object states." + }, + { + "type": "image", + "bbox": [ + 0.256, + 0.603, + 0.744, + 0.826 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.838, + 0.907, + 0.88 + ], + "angle": 0, + "content": "Figure 6. Image attention score allocation for SAIL and its modular MLLM counterpart. We compared the attention score allocation distribution for shallow layers, medium layers, and deep layers between these two models. The Single Transformer-based MLLM model significantly allocates a higher proportion of attention score to image tokens during prediction than the modular MLLM." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.327, + 0.279, + 0.665, + 0.294 + ], + "angle": 0, + "content": "Query: When was the travel agency founded?" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.3, + 0.495, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.279, + 0.438, + 0.444, + 0.451 + ], + "angle": 0, + "content": "Output token: \"2\", Layer 2" + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.3, + 0.774, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.556, + 0.438, + 0.729, + 0.451 + ], + "angle": 0, + "content": "Output token: “2”, Layer 25" + }, + { + "type": "title", + "bbox": [ + 0.373, + 0.483, + 0.619, + 0.497 + ], + "angle": 0, + "content": "Query: What color is the Bicycle?" + }, + { + "type": "image", + "bbox": [ + 0.252, + 0.5, + 0.487, + 0.699 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.284, + 0.701, + 0.461, + 0.714 + ], + "angle": 0, + "content": "Output token: \"red\", Layer 0" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.5, + 0.748, + 0.699 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.542, + 0.701, + 0.725, + 0.714 + ], + "angle": 0, + "content": "Output token: \"red\", Layer 31" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.735, + 0.907, + 0.777 + ], + "angle": 0, + "content": "Figure 7. Visualization of SAIL's attention distribution across image regions during token prediction. In early transformer layers, attention primarily focuses on the salient regions of the image. As the model progresses to deeper layers, attention shifts to areas more relevant to the predicted tokens." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.124, + 0.308, + 0.509, + 0.323 + ], + "angle": 0, + "content": "Example 1: Understanding OCR Information in Webpage." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.335, + 0.777, + 0.509 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.512, + 0.378, + 0.524 + ], + "angle": 0, + "content": "User Please output all the text information." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.528, + 0.851, + 0.687 + ], + "angle": 0, + "content": "SAIL Cornell University arXiv cs arXiv:2304.08485 Computer Science Computer Vision and Pattern Recognition Visual Instruction Tuning HaoTian Liu, Chunyuan Li, Qingyang Wu, Yong Jae Lee Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLM for general-purpose visual and language visual and language this http URL URL Early Experiments: LLMs demonstrate impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLAVA and GPT-4 achieves a new state-of-the-art accuracy of \\(92.53\\%\\). We make GPT-4 generated visual instruction tuning data, our model and code base publicly available. Comments: NeurIPS 2023 Oral; project page: this https URL Subjects: Computer Vision and Pattern Recognition (cs.CV); Artificial Intelligence (cs.AI); Computation and Language (cs.CL); Machine Learning (cs.LG) Cite as: arXiv:2304.08485 [cs.CV] (or arXiv:2304.08485v2 [cs.CV] for this version) https://doi.org/10.48550/arXiv.2304.08485 Submission history From: Haotian Liu [view email] [v1] Mon, 17 Apr 2023 17:59:25 UTC (4,360 KB) [v2] Mon, 11 Dec 2023 17:46:14 UTC (4,985 KB)" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.718, + 0.773, + 0.732 + ], + "angle": 0, + "content": "Table 11. SAIL demonstrates a strong capability to extract information in OCR-rich scenarios." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.125, + 0.246, + 0.427, + 0.261 + ], + "angle": 0, + "content": "Example 2: Understanding Real-World Scene." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.272, + 0.776, + 0.57 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.14, + 0.573, + 0.365, + 0.585 + ], + "angle": 0, + "content": "User Please describe the image in detail." + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.59, + 0.434, + 0.602 + ], + "angle": 0, + "content": "SAIL There are several notable elements in the image:" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.602, + 0.851, + 0.747 + ], + "angle": 0, + "content": "1. The highway is multi-lane, with multiple lanes visible. 2. The traffic is dense, with vehicles in various lanes. 3. The weather seems to be overcast, as indicated by the grayish tones of the image. 4. There are several traffic signs and signs visible, including a green highway sign indicating \"1/2 mile\" to the right, and a green sign with the number \"16\" and the word \"MILE\" on the left side of the image. 5. The background includes a hill or mountain, with trees and greenery visible. 6. There are also several power lines and electrical pylons visible along the sides of the highway. The image's focus is on the traffic and the surrounding environment. The vehicles are primarily in the center and right lanes of the highway, with some vehicles in the left lanes as well. The highway is flanked by lush greenery, including trees and grassy areas, suggesting that this is a rural or possibly a suburban area. The traffic appears to be moving at a moderate pace. The weather conditions suggest that this is a daytime scene, possibly during the morning or late afternoon. The image is taken from a perspective that suggests the viewer is in a vehicle on the highway, observing the traffic ahead. In summary, the image captures a typical highway scene with moderate traffic, surrounded by a mix of natural and built environments, under an overcast sky. The road is in good condition, and the vehicles are traveling smoothly in both directions." + }, + { + "type": "table_caption", + "bbox": [ + 0.226, + 0.78, + 0.763, + 0.794 + ], + "angle": 0, + "content": "Table 12. SAIL demonstrates a strong capability about understanding the real-world scene." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.123, + 0.234, + 0.444, + 0.251 + ], + "angle": 0, + "content": "Example 3: Understanding the Scientific Charts." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.261, + 0.476, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.393, + 0.369, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.393, + 0.475, + 0.456 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.457, + 0.345, + 0.599 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.347, + 0.457, + 0.476, + 0.599 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.478, + 0.261, + 0.775, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.478, + 0.36, + 0.626, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.627, + 0.36, + 0.772, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.478, + 0.446, + 0.772, + 0.514 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.478, + 0.514, + 0.774, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.14, + 0.608, + 0.367, + 0.621 + ], + "angle": 0, + "content": "User Please describe the image in detail." + }, + { + "type": "image_footnote", + "bbox": [ + 0.14, + 0.625, + 0.851, + 0.76 + ], + "angle": 0, + "content": "This image is a collage of various objects, foods, and animals, all of which are predominantly blue or have blue elements. The image is divided into sections labeled with letters from A to O. Here is a detailed description of each section: 1. A - Pumpkin: A blue pumpkin is depicted, which is a common symbol associated with autumn and Halloween. 2. B - Fish: A blue fish is shown, which could be a representation of a common aquatic animal. 3. C - M&M: A blue M&M candy is present.4. D - Milk and Soap: A glass of milk, a bar of soap, and a blue sponge.5. E - Bread and Jelly Beans: Blue wheat, a blue loaf of bread, and blue jelly beans.6. F - Castor Oil: A bottle of castor oil.7. G - Cheerios and Snacks: A box of Cheerios, a glass of a dark blue drink, a blue popsicle, a blue donut, and a blue muffin. 8. H - Vegetables: A collection of vegetables including a blue cucumber, green leafy vegetables, and a bunch of blue carrots.9. I - Apple and Stones: A green apple, a blue candy, and blue stones.10. J - Syrup and Candy: Sugar cubes, a bottle of blue syrup, and a blue candy bar.11. K - Coconut and Aloe: A blue coconut, a bottle of blue liquid, and a blue aloe vera plant.12. L - Pineapple and Juice: A blue pineapple, a blue banana, and a glass of blue juice.13. M - Candy Bar: A blue candy bar.14. N - Blueberries: Blueberries.15. O - Oval Object: A blue oval-shaped object." + }, + { + "type": "table_caption", + "bbox": [ + 0.228, + 0.791, + 0.76, + 0.805 + ], + "angle": 0, + "content": "Table 13. SAIL demonstrates a strong capability about understanding the scientific charts." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.124, + 0.26, + 0.406, + 0.276 + ], + "angle": 0, + "content": "Example 4: Understanding Poster Content." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.284, + 0.592, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.14, + 0.535, + 0.365, + 0.546 + ], + "angle": 0, + "content": "User Please describe the image in detail." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.551, + 0.851, + 0.721 + ], + "angle": 0, + "content": "SAIL At the top of the image, there is a decorative border with a pattern of hanging golden ornaments, which adds a festive and celebratory feel to the overall design. The background is plain white, which helps the colorful elements stand out. In the center of the image, the text \"Happy Sankranti\" is prominently displayed in a large, elegant, and stylized font. The text is in a warm orange color, which is traditionally associated with the festival. Below the text, there is a depiction of a woman dressed in traditional Indian attire, likely a saree, in shades of blue and green. She is engaged in the act of preparing a traditional Sankranti sweet, which is a common practice during the festival. The sweet is shaped like a pyramid and is decorated with intricate patterns, reflecting the artistic and cultural heritage of the region. To the left of the woman, there is a three-tiered pot, known as a \"patala\", which is a traditional Indian cooking vessel. The pot is adorned with a colorful design and is placed on a colorful circular mat, which is also a traditional element in Indian households. On the right side of the woman, there is a small hut with a thatched roof, which is a common architectural style in rural Indian villages. The hut is depicted in a simple and rustic manner, emphasizing the rural and traditional aspect of the scene. Overall, the image captures the essence of the Sankranti festival, highlighting the cultural and religious aspects of the celebration. The use of traditional clothing, the preparation of traditional sweets, and the depiction of a rural village scene all contribute to a sense of cultural authenticity and celebration." + }, + { + "type": "text", + "bbox": [ + 0.234, + 0.765, + 0.754, + 0.779 + ], + "angle": 0, + "content": "Table 14. SAIL demonstrates a strong capability about understanding the poster content." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.114, + 0.188, + 0.129 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.139, + 0.482, + 0.22 + ], + "angle": 0, + "content": "[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.223, + 0.482, + 0.249 + ], + "angle": 0, + "content": "[2] AI Anthropic. The claude 3 model family: Opus, sonnet, haiku. Claude-3 Model Card, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.251, + 0.483, + 0.319 + ], + "angle": 0, + "content": "[3] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023.1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.322, + 0.482, + 0.361 + ], + "angle": 0, + "content": "[4] Hangbo Bao, Li Dong, Songhao Piao, and Furu Wei. Beit: Bert pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.364, + 0.482, + 0.405 + ], + "angle": 0, + "content": "[5] Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, and Sagnak Tasirlar. Introducing our multimodal models, 2023. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.407, + 0.482, + 0.475 + ], + "angle": 0, + "content": "[6] Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. Conceptual 12m: Pushing web-scale image-text pretraining to recognize long-tail visual concepts. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3558-3568, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.477, + 0.482, + 0.558 + ], + "angle": 0, + "content": "[7] Jun Chen, Deyao Zhu, Xiaogian Shen, Xiang Li, Zechun Liu, Pengchuan Zhang, Raghuraman Krishnamoorthi, Vikas Chandra, Yunyang Xiong, and Mohamed Elhoseiny. Minigpt-v2: large language model as a unified interface for vision-language multi-task learning. arXiv preprint arXiv:2310.09478, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.561, + 0.482, + 0.629 + ], + "angle": 0, + "content": "[8] Jieneng Chen, Qihang Yu, Xiaohui Shen, Alan Yuille, and Liang-Chieh Chen. Vitamin: Designing scalable vision models in the vision-language era. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.631, + 0.482, + 0.698 + ], + "angle": 0, + "content": "[9] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 5, 6, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.701, + 0.482, + 0.769 + ], + "angle": 0, + "content": "[10] Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. In European Conference on Computer Vision, pages 19-35. Springer, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.771, + 0.482, + 0.825 + ], + "angle": 0, + "content": "[11] Yangyi Chen, Xingyao Wang, Hao Peng, and Heng Ji. A single transformer for scalable vision-language modeling. Transactions on Machine Learning Research, 2024. 1, 2, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.828, + 0.482, + 0.88 + ], + "angle": 0, + "content": "[12] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. arXiv preprint arXiv:2205.08534, 2022. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.884, + 0.482, + 0.924 + ], + "angle": 0, + "content": "[13] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.139, + 0.483, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.548, + 0.117, + 0.905, + 0.156 + ], + "angle": 0, + "content": "the gap to commercial multimodal models with open-source suites. Science China Information Sciences, 67(12):220101, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.159, + 0.905, + 0.241 + ], + "angle": 0, + "content": "[14] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.243, + 0.905, + 0.337 + ], + "angle": 0, + "content": "[15] Mehdi Cherti, Romain Beaumont, Ross Wightman, Mitchell Wortsman, Gabriel Ilharco, Cade Gordon, Christoph Schuhmann, Ludwig Schmidt, and Jenia Jitsev. Reproducible scaling laws for contrastive language-image learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2818-2829, 2023. 1, 2, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.34, + 0.905, + 0.408 + ], + "angle": 0, + "content": "[16] Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhang-hao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yong-hao Zhuang, Joseph E. Gonzalez, Ion Stoica, and Eric P. Xing. Vicuna: An open-source chatbot impressing gpt-4 with \\(90\\%\\) * chatgpt quality, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.41, + 0.905, + 0.464 + ], + "angle": 0, + "content": "[17] MMSegmentation Contributors. MMSegmentation: Openmmlab semantic segmentation toolbox and benchmark. https://github.com/open-mmlab/mmsegmentation, 2020.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.466, + 0.905, + 0.546 + ], + "angle": 0, + "content": "[18] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. InstructBLIP: Towards general-purpose vision-language models with instruction tuning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.549, + 0.905, + 0.631 + ], + "angle": 0, + "content": "[19] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Peter Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdul-mohsin, et al. Scaling vision transformers to 22 billion parameters. In International Conference on Machine Learning, pages 7480-7512. PMLR, 2023. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.633, + 0.905, + 0.687 + ], + "angle": 0, + "content": "[20] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.689, + 0.905, + 0.756 + ], + "angle": 0, + "content": "[21] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. In Advances in Neural Information Processing Systems, pages 52545-52567. Curran Associates, Inc., 2024. 1, 2, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.758, + 0.905, + 0.826 + ], + "angle": 0, + "content": "[22] Haiwen Diao, Xiaotong Li, Yufeng Cui, Yueze Wang, Haoge Deng, Ting Pan, Wenxuan Wang, Huchuan Lu, and Xinlong Wang. Evev2: Improved baselines for encoder-free vision-language models. arXiv preprint arXiv:2502.06788, 2025. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.829, + 0.905, + 0.923 + ], + "angle": 0, + "content": "[23] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.117, + 0.905, + 0.923 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.959 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.116, + 0.484, + 0.199 + ], + "angle": 0, + "content": "[24] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.201, + 0.484, + 0.27 + ], + "angle": 0, + "content": "[25] Alaaeldin El-Nouby, Michal Klein, Shuangfei Zhai, Miguel Angel Bautista, Alexander Toshev, Vaishaal Shankar, Joshua M Susskind, and Armand Joulin. Scalable pretraining of large autoregressive image models. arXiv preprint arXiv:2401.08541, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.272, + 0.484, + 0.354 + ], + "angle": 0, + "content": "[26] Yuxin Fang, Wen Wang, Binhui Xie, Quan Sun, Ledell Wu, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. Eva: Exploring the limits of masked visual representation learning at scale. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 19358-19369, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.357, + 0.484, + 0.44 + ], + "angle": 0, + "content": "[27] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, et al. Datacomp: In search of the next generation of multimodal datasets. Advances in Neural Information Processing Systems, 36:27092-27112, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.442, + 0.484, + 0.524 + ], + "angle": 0, + "content": "[28] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 8, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.527, + 0.484, + 0.596 + ], + "angle": 0, + "content": "[29] Shuhao Gu, Jialing Zhang, Siyuan Zhou, Kevin Yu, Zhaohu Xing, Liangdong Wang, Zhou Cao, Jintao Jia, Zhuoyi Zhang, Yixuan Wang, et al. Infinity-mm: Scaling multimodal performance with large-scale and high-quality instruction data. arXiv preprint arXiv:2410.18558, 2024. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.599, + 0.484, + 0.695 + ], + "angle": 0, + "content": "[30] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14375-14385, 2024. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.697, + 0.484, + 0.765 + ], + "angle": 0, + "content": "[31] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.768, + 0.484, + 0.836 + ], + "angle": 0, + "content": "[32] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.839, + 0.484, + 0.895 + ], + "angle": 0, + "content": "[33] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, et al. Cogagent: A visual language model for gui agents. arXiv preprint arXiv:2312.08914, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.897, + 0.484, + 0.924 + ], + "angle": 0, + "content": "[34] Xinyu Huang, Youcai Zhang, Jinyu Ma, Weiwei Tian, Rui Feng, Yuejie Zhang, Yaqian Li, Yandong Guo, and Lei" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.116, + 0.484, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.116, + 0.905, + 0.143 + ], + "angle": 0, + "content": "Zhang. Tag2text: Guiding vision-language model via image tagging. arXiv preprint arXiv:2303.05657, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.145, + 0.905, + 0.185 + ], + "angle": 0, + "content": "[35] Zilong Huang, Qinghao Ye, Bingyi Kang, Jiashi Feng, and Haoqi Fan. Classification done right for vision-language pretraining. In NeurIPS, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.187, + 0.905, + 0.254 + ], + "angle": 0, + "content": "[36] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6700-6709, 2019. 8, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.256, + 0.905, + 0.337 + ], + "angle": 0, + "content": "[37] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International conference on machine learning, pages 4904-4916. PMLR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.34, + 0.905, + 0.407 + ], + "angle": 0, + "content": "[38] Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. Mistral 7b. arXiv preprint arXiv:2310.06825, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.409, + 0.905, + 0.45 + ], + "angle": 0, + "content": "[39] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images, 2016. 5, 6, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.451, + 0.905, + 0.505 + ], + "angle": 0, + "content": "[40] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023. 5, 6, 8, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.507, + 0.905, + 0.561 + ], + "angle": 0, + "content": "[41] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.563, + 0.905, + 0.589 + ], + "angle": 0, + "content": "[42] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for clip training. In NeurIPS, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.591, + 0.905, + 0.659 + ], + "angle": 0, + "content": "[43] Xianhang Li, Haoqin Tu, Mude Hui, Zeyu Wang, Bingchen Zhao, Junfei Xiao, Sucheng Ren, Jieru Mei, Qing Liu, Huangjie Zheng, Yuyin Zhou, and Cihang Xie. What if we recapture billions of web images with llama-3? arXiv preprint arXiv:2406.08478, 2024. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.661, + 0.905, + 0.714 + ], + "angle": 0, + "content": "[44] Yifan Li, Yifan Du, Kun Zhou, Jinping Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023. 6, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.716, + 0.905, + 0.783 + ], + "angle": 0, + "content": "[45] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23390-23400, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.786, + 0.905, + 0.854 + ], + "angle": 0, + "content": "[46] Zijing Liang, Yanjie Xu, Yifan Hong, Penghui Shang, Qi Wang, Qiang Fu, and Ke Liu. A survey of multimodel large language models. In Proceedings of the 3rd International Conference on Computer, Artificial Intelligence and Control Engineering, pages 405-409, 2024. 5, 6, 8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.856, + 0.905, + 0.922 + ], + "angle": 0, + "content": "[47] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 6, 7, 9, 10" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.116, + 0.905, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.116, + 0.482, + 0.158 + ], + "angle": 0, + "content": "[48] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava next: Improved reasoning,OCR, and world knowledge, 2024.3,6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.16, + 0.482, + 0.2 + ], + "angle": 0, + "content": "[49] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.202, + 0.482, + 0.272 + ], + "angle": 0, + "content": "[50] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In European conference on computer vision, pages 216-233. Springer, 2024. 5, 6, 8, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.273, + 0.482, + 0.342 + ], + "angle": 0, + "content": "[51] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. Ocrbench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 67(12):220102, 2024. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.343, + 0.482, + 0.425 + ], + "angle": 0, + "content": "[52] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. Advances in Neural Information Processing Systems, 35:2507-2521, 2022. 5, 6, 8, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.428, + 0.482, + 0.496 + ], + "angle": 0, + "content": "[53] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.498, + 0.482, + 0.568 + ], + "angle": 0, + "content": "[54] Gen Luo, Xue Yang, Wenhan Dou, Zhaokai Wang, Jiawen Liu, Jifeng Dai, Yu Qiao, and Xizhou Zhu. Mono-internvl: Pushing the boundaries of monolithic multimodal large language models with endogenous visual pre-training. arXiv preprint arXiv:2410.08202, 2024. 1, 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.569, + 0.482, + 0.624 + ], + "angle": 0, + "content": "[55] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.626, + 0.483, + 0.68 + ], + "angle": 0, + "content": "[56] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF winter conference on applications of computer vision, pages 2200-2209, 2021. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.682, + 0.482, + 0.764 + ], + "angle": 0, + "content": "[57] Sachin Mehta, Maxwell Horton, Fartash Faghri, Mohammad Hossein Sekhavat, Mahyar Najibi, Mehrdad Farajtabar, Oncel Tuzel, and Mohammad Rastegari. Catlip: Clipsevel visual recognition accuracy with 2.7 x faster pretraining on web-scale image-text data. arXiv preprint arXiv:2404.15653, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.767, + 0.482, + 0.793 + ], + "angle": 0, + "content": "[58] Meta. Introducing meta llama 3: The most capable openly available llm to date, 2024. Accessed: 2024-04-18. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.796, + 0.428, + 0.81 + ], + "angle": 0, + "content": "[59] OpenAI. Introducing chatgpt. OpenAI Blog, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.811, + 0.368, + 0.824 + ], + "angle": 0, + "content": "[60] OpenAI. Gpt-4 technical report, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.826, + 0.406, + 0.84 + ], + "angle": 0, + "content": "[61] OpenAI. Gpt-4v(ision) system card, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.842, + 0.482, + 0.923 + ], + "angle": 0, + "content": "[62] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 1, 2, 3, 6, 8" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.116, + 0.483, + 0.923 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.116, + 0.905, + 0.197 + ], + "angle": 0, + "content": "[63] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. LAION-5B: An open large-scale dataset for training next generation image-text models. In NeurlPS, 2022. 3, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.2, + 0.905, + 0.267 + ], + "angle": 0, + "content": "[64] Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatronlm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.269, + 0.905, + 0.338 + ], + "angle": 0, + "content": "[65] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8317-8326, 2019. 5, 6, 8, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.34, + 0.905, + 0.422 + ], + "angle": 0, + "content": "[66] Daria Soboleva, Faisal Al-Khateeb, Robert Myers, Jacob R Steeves, Joel Hestness, and Nolan Dey. SlimPajama: A 627B token cleaned and deduplicated version of RedPajama. https://cerebras.ai/blog/slimpajama-a-627b-token-cleaned-and-deduplicated-version-of-redpajama, 2023.4.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.423, + 0.905, + 0.463 + ], + "angle": 0, + "content": "[67] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.465, + 0.905, + 0.533 + ], + "angle": 0, + "content": "[68] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.535, + 0.905, + 0.617 + ], + "angle": 0, + "content": "[69] Peter Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Adithya Jairam Vedagiri IYER, Sai Charitha Akula, Shusheng Yang, Jihan Yang, Manoj Middepogu, Ziteng Wang, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. Advances in Neural Information Processing Systems, 37:87310-87356, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.619, + 0.905, + 0.687 + ], + "angle": 0, + "content": "[70] Shengbang Tong, Zhuang Liu, Yuexiang Zhai, Yi Ma, Yann LeCun, and Saining Xie. Eyes wide shut? exploring the visual shortcomings of multimodal llms. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9568-9578, 2024. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.688, + 0.905, + 0.742 + ], + "angle": 0, + "content": "[71] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. Advances in Neural Information Processing Systems, 36:46830-46855, 2023. 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.744, + 0.905, + 0.798 + ], + "angle": 0, + "content": "[72] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. Advances in Neural Information Processing Systems, 36:46830-46855, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.8, + 0.905, + 0.855 + ], + "angle": 0, + "content": "[73] Haochen Wang, Junsong Fan, Yuxi Wang, Kaiyou Song, Tiancai Wang, Xiangyu Zhang, and Zhaoxiang Zhang. Bootstrap masked visual modeling via hard patches mining. arXiv preprint arXiv:2312.13714, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.856, + 0.905, + 0.923 + ], + "angle": 0, + "content": "[74] Haochen Wang, Kaiyou Song, Junsong Fan, Yuxi Wang, Jin Xie, and Zhaoxiang Zhang. Hard patches mining for masked image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10375-10385, 2023. 5" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.116, + 0.905, + 0.923 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.116, + 0.484, + 0.171 + ], + "angle": 0, + "content": "[75] Haochen Wang, Anlin Zheng, Yucheng Zhao, Tiancai Wang, Ge Zheng, Xiangyu Zhang, and Zhaoxiang Zhang. Reconstructive visual instruction tuning. In International Conference on Learning Representations, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.173, + 0.484, + 0.257 + ], + "angle": 0, + "content": "[76] Jiacong Wang, Bohong Wu, Haiyong Jiang, Zhou Xun, Xin Xiao, Haoyuan Guo, and Jun Xiao. World to code: Multimodal data generation via self-instructed compositional captioning and filtering. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4608-4623, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.258, + 0.484, + 0.342 + ], + "angle": 0, + "content": "[77] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution, 2024. 1, 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.343, + 0.484, + 0.384 + ], + "angle": 0, + "content": "[78] Wenxuan Wang, Quan Sun, Fan Zhang, Yepeng Tang, Jing Liu, and Xinlong Wang. Diffusion feedback helps clip see better. arXiv preprint arXiv:2407.20171, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.385, + 0.484, + 0.453 + ], + "angle": 0, + "content": "[79] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.455, + 0.484, + 0.512 + ], + "angle": 0, + "content": "[80] Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. SimVLM: Simple visual language model pretraining with weak supervision. In International Conference on Learning Representations, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.513, + 0.484, + 0.54 + ], + "angle": 0, + "content": "[81] X.ai. Grok-1.5 vision preview. https://x.ai/blog/grok-1.5v, 2024.5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.542, + 0.484, + 0.598 + ], + "angle": 0, + "content": "[82] Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, and Jian Sun. Unified perceptual parsing for scene understanding. In Proceedings of the European conference on computer vision (ECCV), pages 418-434, 2018. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.599, + 0.484, + 0.654 + ], + "angle": 0, + "content": "[83] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.655, + 0.484, + 0.724 + ], + "angle": 0, + "content": "[84] Qiying Yu, Quan Sun, Xiaosong Zhang, Yufeng Cui, Fan Zhang, Yue Cao, Xinlong Wang, and Jingjing Liu. Capsfusion: Rethinking image-text data at scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14022-14032, 2024. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.726, + 0.484, + 0.782 + ], + "angle": 0, + "content": "[85] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.783, + 0.484, + 0.839 + ], + "angle": 0, + "content": "[86] Mert Yuksekgonul, Federico Bianchi, Pratyusha Kalluri, Dan Jurafsky, and James Zou. When and why vision-language models behave like bags-of-words, and what to do about it? arXiv preprint arXiv:2210.01936, 2022. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.84, + 0.484, + 0.895 + ], + "angle": 0, + "content": "[87] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11975-11986, 2023. 3, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.896, + 0.484, + 0.925 + ], + "angle": 0, + "content": "[88] Tao Zhang, Xiangtai Li, Zilong Huang, Yanwei Li, Weixian Lei, Xueqing Deng, Shihao Chen, Shunping Ji, and Jiashi" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.116, + 0.484, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.116, + 0.905, + 0.144 + ], + "angle": 0, + "content": "Feng. Pixel-sail: Single transformer for pixel-grounded understanding. arXiv, 2025. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.145, + 0.905, + 0.227 + ], + "angle": 0, + "content": "[89] Youcai Zhang, Xinyu Huang, Jinyu Ma, Zhaoyang Li, Zhaochuan Luo, Yanchun Xie, Yuzhuo Qin, Tong Luo, Yaqian Li, Shilong Liu, et al. Recognize anything: A strong image tagging model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1724-1732, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.229, + 0.905, + 0.298 + ], + "angle": 0, + "content": "[90] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.3, + 0.905, + 0.356 + ], + "angle": 0, + "content": "[91] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 1, 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.116, + 0.905, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "20" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10462/963870cb-6527-42ff-97aa-d1b9f35a156b_origin.pdf b/data/2025/2504_10xxx/2504.10462/963870cb-6527-42ff-97aa-d1b9f35a156b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3a0018705494e367e6e472311aac115b320d917e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/963870cb-6527-42ff-97aa-d1b9f35a156b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:deb06487d17d2cd4b28d0f1f9a041b61a7cb5aac47514ef2bf5ea6d0a50a6caa +size 2073141 diff --git a/data/2025/2504_10xxx/2504.10462/full.md b/data/2025/2504_10xxx/2504.10462/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8ff2d6a0b906c2a0eb40b4a31fb1534f57f63c56 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/full.md @@ -0,0 +1,485 @@ +# The Scalability of Simplicity: Empirical Analysis of Vision-Language Learning with a Single Transformer + +Weixian Lei* Jiacong Wang* Haochen Wang* +Xiangtai Li Jun Hao Liew Jiashi Feng Zilong Huang† +*Equal contribution, † Project Lead +Bytedance Seed + +# Abstract + +This paper introduces SAIL, a single transformer unified multimodal large language model (MLLM) that integrates raw pixel encoding and language decoding within a singular architecture. Unlike existing modular MLLMs, which rely on a pre-trained vision transformer (ViT), SAIL eliminates the need for a separate vision encoder, presenting a more minimalist architecture design. Instead of introducing novel architectural components, SAIL adapts mix-attention mechanisms and multimodal positional encodings to better align with the distinct characteristics of visual and textual modalities. We systematically compare SAIL's properties—including scalability, cross-modal information flow patterns, and visual representation capabilities—with those of modular MLLMs. By scaling both training data and model size, SAIL achieves performance comparable to modular MLLMs. Notably, the removal of pretrained ViT components enhances SAIL's scalability and results in significantly different cross-modal information flow patterns. Moreover, SAIL demonstrates strong visual representation capabilities, achieving results on par with ViT-22B in vision tasks such as semantic segmentation. Code and models are available1. + +# 1. Introduction + +The pursuit of multimodal intelligence has driven the development of Multimodal Large Language Models (MLLMs) [49, 61, 68], which typically adopt a modular design: a pre-trained vision encoder (e.g., CLIPViT [15, 62]) extracts image features, a Large Language Model (LLM) [2, 16, 38, 58-60] processes text, and a lightweight projector aligns the two modalities. This framework achieves strong performance through multi-stage pretraining, supervised fine-tuning (SFT), and post-training on multimodal datasets [3, 18, 49, 76, 77, 91]. While effective, + +![](images/87aacb4266bb5df777e2e46d47cbe6631fbba6a4cecc29791d9ff98e59ada6b7.jpg) +(A) +Figure 1. (A) Data scaling curve for Modular Multimodal Large Language Model (MLLM) and SAIL, our Single Transformer-based MLLM. As pretraining data increases, the single transformer SAIL shows a sharper performance gain, demonstrating its superior data scalability. (B) Comparison to existing Single Transformer-based MLLMs: our SAIL pushes the performance boundaries on both vision tasks and vision-language tasks. + +![](images/74ee2a817ef3268b95e72eeb8f2fb43f406463207bb7cde5d1a436e7ac6a0bd7.jpg) +(B) + +this modular MLLM paradigm inherently fragments multimodal processing, reinforces reliance on pretrained visual encoders, which may limit deployment flexibility and scalability [11, 21, 54]. + +A promising alternative is to eliminate the visual encoder entirely and process raw image patches and text tokens within a single Transformer. This unified architecture removes modality-specific modules, enabling parameter sharing and end-to-end learning of vision-language interactions. Previous works [11, 21, 54] have primarily explored the architecture design, training data, and methods of Single Transformer-based MLLMs. However, little exploration has been given to their fundamental properties, such as scalability, cross-modal information flow patterns, and visual representation capabilities. A deeper understanding of these properties is crucial for unlocking the full potential of Single Transformer-based MLLMs. + +In this work, we present an experimental analysis of the fundamental properties of Single Transformer-based MLLMs and compare them to modular MLLMs (e.g. + +LLaVA [49]). Additionally, in the absence of a pre-trained visual encoder, Single Transformers have to learn visual representations from scratch. Thus, an intriguing question arises: can a trained Single Transformer emerge as a strong vision encoder? + +We conduct a series of experiments to train and study our Single trAnformer model for vIsion and Language (SAIL). While we do not propose novel architecture designs, we introduce necessary modifications to enable the model to process different modalities in a unified architecture. In its micro architecture design, we address the different spatial characteristics of 2D images and 1D text data by employing a mixed attention mechanism: bidirectional attention for image patches and causal attention for text tokens, combined with multimodal rotary position embedding. Through model and data scaling, SAIL achieves performance on vision-language benchmarks comparable to modular MLLMs, while also functioning as a high-performing vision backbone, as shown in Figure 1. + +More concretely, our empirical analysis uncovers following striking advantages of Single Transformer architectures: (i) Superior Data Scaling: In controlled experiments, SAIL exhibits steeper performance gains as pretraining data scales. While LLaVA-style modular MLLMs initially perform well, our model's performance becomes very close to theirs when pretrained on 512M samples, as shown in Figure 1(A). This suggests that unified architectures can effectively leverage large-scale data and potentially match the performance of modular MLLMs. + +(ii) Vision Centric Information Flow Pattern: Through analysis of attention distributions, we observe that Single Transformers assign significantly higher attention scores to image tokens during token prediction compared to modular MLLMs. This indicates that the information flow in Single Transformer MLLMs is more direct, with visual tokens influencing prediction tokens more prominently, highlighting a vision-centric approach to decision-making. + +(iii) Vision Encoder Functioning: Our experiments further demonstrate that the pretrained Single Transformer inherently serves as a powerful vision encoder. Comprehensive evaluations on vision-centric tasks, such as image classification and semantic segmentation, show that the model learns rich visual representations during multimodal pretraining. These representations enhance its capacity for both semantic-level comprehension (e.g., object categorization) and pixel-level understanding (e.g., fine-grained segmentation masks), bridging high-level abstraction and low-level visual reasoning within a unified architecture. + +In summary, our findings indicate that Single Transformer-based MLLMs hold great promise in surpassing modular MLLMs in terms of leveraging large-scale data, forming direct vision-centric information pathways, and functioning as effective vision encoders. We hope + +our empirical findings inspire further research to refine and enhance Single Transformer architecture, ultimately driving advancements in multimodal intelligence from a new perspective. + +# 2. Related Work + +# 2.1. Paradigms in Vision-Language Model Design + +Modular MLLMs with Visual Encoders. The prevailing approach in MLLM design employs modular architectures [3, 49, 61, 68, 77] that rely on pretrained vision encoders (e.g., CLIP-ViT [15, 62], InternViT [14]) to process visual inputs. The visual features extracted from these frozen encoders are then aligned with LLM input spaces via linear [7, 49, 75, 91] or cross-attention layers [1, 33]. While this module design enables effective transfer of pretrained visual-language knowledge, it also introduces several limitations. First, incorporating a separate ViT encoder significantly slows down both training and inference, increasing deployment complexity and requiring costly infrastructure—especially when compared to a single transformer unified model. Second, common strategies for integrating visual features, such as direct mapping into LLM inputs [7, 49, 91] or sharing them across LLM layers [1, 33], often struggle to reconcile the inherent differences between images and text representations. Finally, as model scale, balancing the interactions between the the encoder, LLM, and alignment layers becomes increasingly challenging [11, 54]. Thus, in this work, we explore a single transformer-based MLLM architecture that eliminates the ViT encoder and alignment components to overcome these challenges. + +Single Transformer-based MLLMs Without Visual Encoders. Emerging research explores end-to-end architectures that process raw image patches and text tokens through a single Transformer, bypassing visual encoders entirely. These monolithic designs fall into two categories: continuous tokenization and discrete tokenization. Continuous tokenization, exemplified by Fuyu-8B [5] and SOLO [11], directly maps patches to LLM embeddings via linear projections, enabling flexible resolution handling but requiring massive pretraining data. Discrete tokenization, adopted by Chameleon [67] and Emu3 [79], employs VQ-VAE tokenizers to compress images into discrete tokens, trading pixel-level fidelity for generation capabilities. While later efforts such as EVE [21] and MonoInternVL [54] demonstrate the feasibility of encoder-free training, critical gaps remain: (1) Existing methods rely on extra designs and auxiliary loss [21], complicating training pipelines; (2) The scaling laws and fundamental properties of purely end-to-end trained models remain poorly understood; (3) Vision-language interaction in shared parameter spaces lacks systematic analysis—most prior MLLMs + +default to causal attention for processing image-text sequences. In this work, we reveal that enabling bidirectional attention between image patches significantly enhances visual representation learning, addressing a key limitation in current designs. More importantly, our study bridges these gaps by establishing foundational principles for training scalable, self-contained single-transformer MLLMs. + +# 2.2. Vision Representation Learning + +Learning effective vision representations is a core challenge in computer vision research, with extensive works [4, 19, 20, 26, 31, 80] dedicated to this problem. With the proliferation of large-scale web-sourced image-text datasets [6, 27, 63], recent methods leverage this data to train deep visual representations via three primary paradigms: + +Text as Classification Labels. Early methods used textual descriptions as weak supervision by extracting categorical labels from captions. For example, frameworks like Tag2Text [34] and RAM [89] used ViTs [23] to predict noun-based pseudo-labels from datasets like CC12M [6]. CatLIP [57] scaled labels to millions using object-centric supervision, and SuperClass [35] directly used tokenized text tokens as classification categories. + +Image-Text Contrastive Learning Contrastive pretraining, as exemplified by CLIP [15, 62] and ALIGN [37], aligns global image-text embeddings within a shared latent space. Subsequent works [8, 14, 42, 45, 78, 87] focused on enhancing CLIP's performance and improving training efficiency. + +Text as Autoregressive Targets. Caption generation as a pretext task is another approach for visual representation learning. SimVLM [80] trains encoder-decoder architectures to autoregressively predict captions, while CapPa [71] trains vision encoders through sequence prediction. These methods often retain modular designs or auxiliary components like contrastive losses [83]. Our work aligns with this category but removes architectural fragmentation by jointly modeling image patches and text tokens in a single Transformer. We find that the pre-trained Single Transformer learns transferable vision representations, enabling it to handle downstream multimodal understanding tasks and function as a vision encoder without modifications. + +# 3. SAIL: Training a Single Transformer for Vision and Language + +# 3.1. Model Architecture + +SAIL is built upon a unified Transformer architecture (Figure 2(A)) that processes multimodal inputs through streamlined, modality-specific preprocessing. For text, raw input is tokenized using the language model's tokenizer and then transformed into embeddings via the textual embedding module. For images, we partition the input into fixed- + +size patches and project them into continuous embeddings via a linear projection. Additionally, we maintain a list of special tokens explicitly designed for visual modality encoding: and tokens mark the beginning and end of an image patch span, respectively. In multimodal scenarios, such as image-text pairs, these embeddings are concatenated into a single sequence and fed into the Transformer, enabling joint cross-modal interactions through unified self-attention layers. This design eliminates the need for modality-specific encoders, which efficiently processess heterogeneous data within a single transformer framework. + +Bidirectional attention within image patches. While existing multimodal large language models (MLLMs) [41, 48, 49, 91] predominantly adopt causal attention for autoregressive sequence modeling, our experiments reveal that enabling full bidirectional attention among tokens from the same image significantly enhances visual representation learning and boosts downstream vision-language task performance. Note that previous Single Transformer works [11, 21, 54, 79] have only utilized causal attention, without exploring the potential of mixed attention mechanisms. + +As illustrated in Figure 2(B), for SAIL we implement a mixed attention scheme: (1) For text tokens, we preserve causal attention to maintain autoregressive generation capabilities, allowing each token to attend only to its predecessors. (2) For image tokens, we activate full bidirectional attention within each image patch group, empowering every visual token to interact with all others in the same image. This design captures holistic spatial relationships and contextual dependencies among visual elements, addressing the under-explored potential of attention mechanisms in cross-modal alignment. The improved interaction paradigm not only refines vision-language feature fusion but also provides stronger inductive biases for complex reasoning tasks. + +Multimodal Rotary Position Embeddings. Following [77], we implement Multimodal RoPE (M-RoPE) in SAIL to harmonize positional modeling for multimodal inputs. The method decomposes positional encoding into two axes: height, and width. For text tokens, all axes share uniform position IDs (aligned with 1D-RoPE), whereas for images, height/width IDs adaptively map to token coordinates, as is shown in Fig 2(C). Notably, position indexing is sequentially initialized across modalities (e.g., starting from images before extending to subsequent text), preserving inter-modal consistency. M-RoPE not only improves positional sensitivity but also constrains absolute position magnitudes for visual tokens, facilitating robust generalization to extended sequences in inference. + +![](images/f93ce8b5d07f651bb28ff57f3b09ece10f95d308a8933e9753d6ce14a5c1df50.jpg) +Figure 2. Model architecture and micro-designs for SAIL. (A) Model Architecture: SAIL is a unified transformer that processes both images and texts without extra module designs. (B) Mixed Attention Mechanism: we adopt bidirectional attention for image patches from the same image and causal attention for text tokens. Examples for a multimodal sequence and a text sequence are provided. Colored squares represent "allow to attend" and white squares indicate "prevent from attending". (C) Multimodal RoPE: an illustration of the multimodal rotary position embedding for SAIL, with examples for a multimodal sequence and a text sequence. + +![](images/02d4e6e0e828472c97f4452200369a3fa7e8e4ab06ccd32a8e320982bf2f5130.jpg) + +# 3.2. Pretraining + +We apply a two-stage curriculum to progressively strengthen the visual perception of SAIL while preserving its inherent language capabilities. + +Stage 1: Accelerated Visual Knowledge Acquisition. In this stage, we pretrain SAIL on large-scale image-text pairs to rapidly bootstrap its visual understanding. To maximize data throughput, we uniformly resize all images to a lower resolution (e.g., $224 \times 224$ ), reducing multimodal sequence lengths and enabling the model to process more samples within fixed training time. To prevent catastrophic forgetting of linguistic knowledge, we interleave pure text corpora with multimodal data during training. This hybrid approach ensures efficient exposure to visual patterns while maintaining robust language proficiency. + +Stage 2: Enhancing Any-Resolution Image Understanding. Real-world applications require robustness to images of varying resolutions and aspect ratios, such as documents, charts, or infographics. Following prior works [11, 21], we extend pretraining with an any-resolution strategy: images retain their native resolutions during processing, and positional embeddings adapt dynamically to arbitrary spatial dimensions. This stage further refines SAIL's ability to model fine-grained visual details (e.g., tabular structures, text-rich graphics) while continuing to incorporate text-only data for language capability preservation. + +Pretraining Objective. Throughout both stages, we optimize the standard language modeling loss only on text to- + +kens. Image patches and special visual tokens are excluded from loss computation. + +# 3.3. Supervised Fine-tuning + +During the Supervised Fine-tuning (SFT) stage, we train SAIL on publicly available, multi-source instruction datasets to enhance its understanding of complex linguistic instructions and diverse dialogue patterns critical for real-world deployment. This phase fine-tunes the entire network architecture, focusing on aligning the model's responses with human intent through exposure to varied instructional formats and multimodal interactions. + +Table 1 shows the details of training datasets for pretraining and supervised fine-tuning (SFT) across all stages. During Stage 1 pretraining, we utilized mixed multimodal and pure text datasets including Recap-DataComp-1B [43] and SlimPajama [66], with images at a resolution of $224\mathrm{x}224$ totaling 512M image-text pairs. In Stage 2, the pretraining datasets include Capfusion [84], self-curated OCR data from LAION COCO [63], InfinityMM Stage 2 subset [29], and SlimPajama, utilizing the any resolution (AnyRes) strategy, with a combined total of 86M image-text pairs along with text data. The SFT stage employed the InfinityMM Stage 3 subset, processed at any resolution, containing 6M image-text pairs. + +
StageDatasetImg.ResNum
Pretraining S1Recap-DataComp-1B [43]224x224512M
SlimPajama [66]-
Pretraining S2Capfusion [84]AnyRes60M
OCR from LAION COCO [63]7M
InifinityMM Stage 2 subset [29]19M
SlimPajama [66]-
SFTInifinityMM Stage3 [29]AnyRes6M
+ +Table 1. Details of training datasets used across all stages. "Img.Res" refers to the image resolution settings applied during each training stage. All datasets listed are publicly available. Note that these settings represent the default configuration for standard SAIL training, while separate settings are used for scaling experiments and ablation studies. + +# 4. Experiment + +# 4.1. Experimental Settings + +Evaluation Benchmarks. For evaluation of vision and language tasks, we evaluate SAIL and existing MLLMs on a broad range of multimodal benchmarks. Specifically, MLLM benchmarks encompass MMBench-EN [50], SEEDBench-IMG [40], MMVet [85], MME [46], HallusionBench [30], MathVistaMINI [53], and OCR-Bench [51]. Visual question answering benchmarks include TextVQA [65], ScienceQA-IMG [52], AI2D [39], MMStar [9], RealWorldQA [81]. + +For evaluation of vision representation learning, we conduct experiments on ImageNet-1K [20] for image classification, ADE20K for semantic segmentation [90], and ARO [86] for attribute, relation, and ordering. + +Implementation Details. For pretraining, we initialize SAIL from the Mistral-7B-v0.1 base LLM and set the patch size to 14. We modify Megatron [64] to support SAIL's multimodal input. Pretraining uses 128 NVIDIA A100 80G GPUs with 2-way tensor parallelism and 64-way data parallelism. The learning rate is set at 5e-5 and decays cosinely to a minimum of 5e-6. For training efficiency, we concatenate sequences from different data samples into one long sequence of 32,768 tokens, adjusting the attention mask to ensure that tokens from different samples do not attend to each other. We use a round-robin approach to interleave image-text packed sequences and pure text packed sequences, configuring the global batch to contain approximately 16K image-text pairs. + +For Supervised Fine-Tuning (SFT), the global batch size is set to 512. Training is performed for one epoch with a maximum learning rate of 1e-5, following a linear warm-up phase and then transitioning to a cosine decay schedule. + +For vision, we load the checkpoint after Stage 1 pretraining and keep it frozen for downstream evaluations, including (1) image classification on ImageNet-1K [20], (2) semantic segmentation on ADE20K [90], and (3) attribute, relation and, ordering on the ARO benchmark [86]. Specif + +ically, (1) for image classification, we utilize an attention-based classifier [25] with 90 epochs of linear probing, where detailed configurations are mostly obtained from common practices [32, 73, 74]. Images are resized to $224 \times 224$ and the global batch size is 8,192 across 8 A100 (80G) GPUs. (2) For semantic segmentation, we adopt ViT-Adapter [12] with UperNet [82] as the segmentation decoder. The implementation is based on MMSegmentation [17] with 80k training iterations. The input resolution is $512 \times 512$ and the global batch size is 16 across 8 A100 (80G) GPUs. (3) For attribute, relation and, ordering, we regard the negative of the caption loss over each image-text pair as the similarity metric for retrieval. + +# 4.2. Experimental Results + +# 4.2.1. Results on Vision Language Tasks + +As shown in Table 2, we compare SAIL against existing MLLMs across 13 vision-language benchmarks. SAIL consistently outperforms other Single Transformer-based models like Fuyu [5], EVE [21], SOLO [11], MonoInternVL [54], and EVE2 [22] across diverse vision-language tasks. This demonstrates that SAIL can achieve significant performance gains and push the boundaries of Single Transformer-based MLLMs without needing extra component designs or auxiliary training losses. Moreover, when compared to methods employing discrete vision tokens (e.g., Chameleon and Emu3), SAIL demonstrates superior performance. These results validate that scaling up single-transformer pretraining effectively enhances cross-modal alignment between images and text. Compared to the state-of-the-art modular MLLM LLaVA-OneVision [41], SAIL achieves comparable performance on some benchmarks, such as MMStar, SEEDBench-IMG, and RealWorldQA. While the performance of Single Transformer-based MLLMs still lags behind modular MLLMs in certain areas, we hypothesize that scaling the pretraining data volume or incorporating higher-quality instruction-tuning data will bridge the remaining performance gap. + +# 4.2.2. Results on Vision Representation Learning + +In this section, we compare the quality of learned visual representations of our SAIL with other Single Transformer-based alternatives, including EVE [21], EVE2 [22], and SOLO [11]. + +Classification and Segmentation. As demonstrated in Table 3, our method, SAIL, achieves a Top-1 accuracy of $84.95\%$ and a Top-5 accuracy of $97.59\%$ on the validation set of ImageNet-1K [20], significantly outperforming state-of-the-art alternatives [11, 21, 22]. In the segmentation task, SAIL also demonstrates superior performance with an mIoU of $55.30\%$ , an mAcc of $67.24\%$ , and an aAcc of $84.87\%$ , illustrated in Table 3. These results indicate that SAIL is effective in both classification and segmentation tasks, of + +
Method#Param#Data#VtokenGeneral VQAHallucinationMath&knowledgeOCR VQA
MMS*MMBenSEEDIMMVMMERWQAPOPEHalluSQA1MathVTQAAI2DOCRB
Modular MLLMs:
InternVL-1.5 [13]2.2B-/-332846.770.969.839.3190257.988.337.384.941.370.569.8654
QwenVL-Chat [3]7B7.2B / 50M25634.560.658.2-184849.3-36.868.235.361.545.9488
LLVA-1.5 [47]7B0.4B+ / 665K57633.164.364.330.5185954.885.927.666.825.546.154.8318
LLVA-1.6 [48]7B0.4B+ / 760K288037.667.464.743.9184257.886.427.670.232.564.966.6532
Cambrian-1 [69]8B10B+ / 7M57650.775.974.7--64.2-30.680.448.171.773.0-
LLVA-OneVision [41]7B10B+ / 3.2M729060.981.774.858.8199865.5--96.656.1-81.6-
Single Transformer-based MLLMs:
Fuyu [5]8B-/--34.410.759.321.4-43.78429.856.830.2-46.8366
Chameleon [67]7B1.4B+ / 1.8M102431.131.130.68.31703919.417.147.222.54.846.07.0
EVE [21]7B33M / 1.8M2304-52.364.625.71628-85.0-64.9-56.861.0398
SOLO [11]8B43.7M / 2M102435.867.764.430.4126044.778.640.473.332.925.061.4126
Mono-InternVL [54]3B1.3B / 7M6400-65.567.440.11875--45.793.645.772.668.6767
Emu3 [79]8B-/-16K46.658.568.237.2-57.485.231.789.231.364.770.0687
EVE2 [22]7B92M / 7.3M2500-66.371.445.0170962.487.6-96.2-71.174.8702
SAIL7B600M / 6M360053.170.172.946.3171963.985.854.293.357.077.176.7783
+ +Table 2. Comparison with existing vision-language models on various vision-language benchmarks, including MMS*: MMStar [9]; MMBen:MMBench-EN [50]; SEED:SEEDBench-Img [40]; MMV:MMVet [85]; MME [46]; POPE [44]; Hallu: HallusionBench [30]; SQA: ScienceQA-Img [52]; TVQA: TextVQA [65]; MathV: MathVistaMINI [53]; AI2D [39]; RWQA: RealWorldQA [81]; OCRB:OCR-Bench [51]. Note that #A-Param denotes the number of activated parameters; #Data represents the pre-training / fine-tuning data volume; #Vtoken indicates the maximum image patch tokens. For MME, we report the sum of perception and cognition scores. The top two results are highlighted in bold and underline, respectively. All results are derived from those reported in other papers and the official reproduction results from the OpenCompass leaderboard [24]. Our results are obtained by VLMEvalKit [24]. + +
MethodClassificationSegmentation
Top-1Top-5mIoUmAccaAcc
EVE [21]42.0365.7727.1235.8972.91
EVE2 [22]44.8669.4140.8553.5379.31
SOLO [11]59.1080.8935.1144.8176.02
SAIL84.9597.5955.3067.2484.87
+ +Table 3. Comparison on image classification and semantic segmentation with other encoder-free approaches. Our SAIL outperforms other alternatives by a large margin. + +
Method#Data#ParamImageNet-1KADE20K
OpenCLIP-H [15]2B0.6B84.4-
OpenCLIP-G [15]2B1.8B86.239.3†
ViT-22B [19]3B22B89.555.3
InternViT [14]6B6B88.258.7
SAIL0.5B7B85.055.3
+ +fering substantial improvements over existing methods. In Table 4, even when comparing with other state-of-the-art vision backbones, our SAIL manages to achieve remarkable competitive performance with significantly less training data, demonstrating the scaling property of SAIL. + +Attribute, Relation, and Ordering. To systematically evaluate the ability of SAIL to understand different types of + +Table 4. Comparison on image classification and semantic segmentation with other vision backbones. $\dagger$ indicates training with head tuning using UperNet [82], while others are based on ViT-Adapter [12]. SAIL, with significantly less training data, achieves competitive performance. + +
MethodRelationAttributeOrder
COCOFlickr30K
OpenCLIP-H [15]49.964.632.640.4
OpenCLIP-G [15]49.965.633.038.3
CLIP-B/32 [62]59.262.948.157.9
CLIP-L/14 [62]61.261.746.856.8
InternViT [14]59.666.073.476.3
NegCLIP [86]81.071.086.091.0
CapPa [72]86.785.798.899.2
SAIL100.099.5100.0100.0
+ +Table 5. Comparison on attribute, relation, and ordering (ARO) with other vision backbones. SAIL almost encodes compositional relationships between objects and attributes perfectly. + +relationships, attributes, and order information, we conduct experiments on the ARO benchmark [86]. As demonstrated in Table 5, SAIL encodes compositional relationships between objects and attributes almost perfectly, significantly surpassing other state-of-the-art vision backbones. + +For additional vision-related tasks, please refer to PixelSAIL [88] for SAIL's downstream capabilities in pixel-grounded understanding. + +# 4.3. Properties of Single Transformer + +# 4.3.1. Scaling Properties. + +Model Scaling: We selected models of different sizes: SAIL-0.5B, SAIL-3B, and SAIL-7B (SAIL by default) for our experiments. Each model underwent Stage 1 pretraining on a mixed multimodal and pure text dataset, encountering 512M image-text pairs. Subsequently, they were fine + +![](images/e8f37e8f122c2c39ea9d4d546ac139f8ce9719c6c97ce52462f5ab50d470db13.jpg) +Figure 3. Model scaling of SAIL. Left: As the model size increases, the training language modeling loss gradually decreases. Right: As the model size increases, performance on downstream VLM tasks progressively improves. + +![](images/08e984e5b7eac62672319f52aa00b0a3fb3f0b7e9bc754dbd7bad2e6f2348257.jpg) + +tuned on the LLaVA-mix-665K dataset using the any resolution (anyres) strategy. We evaluated the models based on their performance on vision and language benchmarks after supervised fine-tuning. + +The normalized performance of SAIL against model size is plotted in Figure 3. As the model size scales up, we observe a corresponding enhancement in performance. Additionally, as shown on the left side of Figure 3, the training language modeling loss decreases with increasing model size. This reduction in training loss indicates that larger models have a greater capacity to learn multimodal alignments effectively, enabling them to capture complex relationships between vision and language more accurately. The improved learning capacity directly translates to better performance on downstream VLM tasks, showcasing the benefits of scaling up the Single Transformer architecture. + +Data Scaling: we compared SAIL with its modular MLLM counterpart. For the modular MLLM, we used SigLIPSO [87] as the vision encoder, and the language model shared the same architecture and initialization parameters as SAIL. Both models were pre-trained using Pretraining stage-1 setting, with SAIL encountering 32M, 128M, and 512M image-text pairs during training, followed by fine-tuning on the LLaVA-mix-665K dataset. All parameters of both models are trainable. Both models employ an identical number of input tokens for images and text. The normalized performance of both models is plotted in Figure 1(A). The results show that in the low-data regime (32M), SAIL's performance lags behind the modular MLLM, likely due to SigLIP's prior training on 40B samples. However, as the data scales, SAIL exhibits a steeper performance curve, indicating more promising data scaling properties. At 512M image-text pairs, SAIL achieves performance comparable to the modular MLLM in our evaluation subset. This demonstrates the single transformer's superior data scalability, even without a pretrained vision encoder. + +Quantitative results on evaluated benchmark tasks of model scaling and data scaling are tabulated in the appendix. + +![](images/23f836472eb6724001df347ec8381d670f86dae4957d99f0de1396cfaa1bc7f7.jpg) +Figure 4. Image Attention Score Allocation: The figure shows the proportion of image attention scores across different transformer layers for Single Transformer-based MLLM and modular MLLM when predicting tokens. Single Transformer-based MLLM generally allocates higher attention weights to image tokens compared to modular MLLM. + +# 4.3.2. Information Flow Pattern + +Different attention pattern compared to modular MLLM: since our comparative experiments show that the Single Transformer model exhibits more promising data scaling properties, we conducted an analysis of the trained SAIL model and its modular MLLM counterpart. Specifically, we followed the methodology from FastV [10] to analyze the attention score distribution for each predicted token given an image and a user query. This analysis focuses on how much attention is allocated to image tokens during token prediction. We selected 1000 samples from various datasets including VQAv2, GQA, TextVQA, DocVQA, MME, SEEDBench-IMG, MMBench, and some self-curated dialog examples. For each model prediction, we computed the average attention scores assigned to previous image tokens by the output token. + +We conducted a comparative experiment between Single Transformer-based MLLMs and modular MLLMs. The Single Transformer-based MLLMs included SAIL, SOLO [11], and EVE [21], while the modular MLLMs included Qwen2-VL [77], LLaVA-OneVision [41], and LLaVA1.5 [47]. + +The results are depicted in Figure 4. Single Transformer-based MLLMs allocate between $60\%$ and $80\%$ of attention scores to image tokens across all layers when predicting tokens. In contrast, modular MLLMs such as Qwen2-VL and LLaVA-OneVision allocate only $10\%$ to $30\%$ of attention scores to image tokens across different layers. For LLaVA1.5, which does not update the ViT parameters during supervised fine-tuning (SFT), the image attention score is relatively high in the first two transformer layers but declines sharply in subsequent layers. + +
MethodMMBench [50]MME [46]
Physical RelationCelebrity RelationPositionPostersCelebrity
Modular MLLM30.450.598.3134.0100.3
SAIL52.288.9160.0108.275.0
+ +From this experiment, we can conclude that Single Transformer-based MLLMs tend to allocate a significant portion of attention to previous image tokens during prediction. In contrast, modular MLLMs allocate a smaller portion of their attention directly to image tokens, indicating a less image-centric approach in their prediction mechanism. + +These findings indicate that the Single Transformer model places more emphasis on grounding its predictions in the visual information. As the model undergoes data scaling, it allocates more effective computation to image tokens, thereby enhancing its capability as a vision-centric model. + +In summary, the attention pattern analysis underscores the Single Transformer's ability to robustly integrate visual context, enabling it to scale efficiently and potentially outperform modular MLLMs in vision-language tasks. + +# 4.3.3. Task-Specific Performance Analysis + +We dissect SAIL's strengths and limitations through targeted case studies: + +Strengths: Spatial Reasoning. SAIL excels at tasks requiring precise spatial location. As shown in Table 6, under the setting of our data scaling experiment, it outperforms the modular counterpart by 61.7 points on the MME Position split and $21.8\%$ on MMBench Physical Relation questions. The unified architecture likely enables tighter coupling between visual geometry and linguistic descriptions. + +Weaknesses: World Knowledge. Conversely, SAIL falls short in tasks that demand extensive world knowledge. As shown in Table 6 SAIL underperforms in the MME celebrity and art splits compared to the modular MLLM. This underperformance can be attributed to SAIL's lack of diverse domain-specific data during pretraining, a gap that was not sufficiently addressed during supervised fine-tuning. Modular MLLMs, with their pretrained vision encoders like CLIP [15, 62] or SigLIP [87], have a broader knowledge base and therefore handle such tasks more effectively. We hypothesize that scaling up SAIL's pretraining data diversity could help bridge this gap, enhancing its performance on knowledge-intensive tasks. + +Table 6. Performance Comparison of SAIL and Modular MLLM in MMBench and MME Tasks: the strengths of SAIL in spatial reasoning tasks (MMBench Physical Relation and MME Position split) and its weaknesses in world knowledge tasks (MMBench Celebrity Relation and MME Celebrity and Posters splits). + +
Exp. SettingVQAv2GQASQATQASEED-I
Default59.146.959.620.135.1
#1 No Img full attn57.845.258.716.233.8
#2 No pure text in PT56.342.148.618.332.4
+ +Table 7. Ablation Study on Basic Factors for SAIL: This table presents the impact of different ablation settings on the performance of SAIL across VQAv2 [28], GQA [36], SQA [52], TQA [65], and SEED-I [40]. The default setting includes image full attention and the inclusion of pure text data in pretraining. Ablation #1 removes image full attention, and ablation #2 excludes pure text in pretraining. + +# 4.4. Empirical Observations on Basic Factors + +To guide scalable training of single-transformer MLLMs, we conduct ablation studies on two critical design choices using SAIL-0.5B pretrained on 128M image-text pairs and fine-tuned on LLaVA-mix-665K. Performance is evaluated through zero-shot image classification after pretraining [71] and vision-language benchmarks after SFT. + +Bidirectional Attention for Image Patches with Multimodal Position Encoding. We compare two approaches for integrating image patches into the transformer: (1) Causal attention with 1D positional encoding, using a token to demarcate image rows. (2) Full bidirectional attention for image patches paired with multimodal rotary position embeddings (RoPE), which jointly encode spatial coordinates (e.g., 2D grid positions) and text token positions. As shown in Table 7, configuration of using bidirectional attention with multimodal RoPE significantly improves performance on vision-language tasks, with a particularly notable gain of $3.1\%$ on TextVQA. This suggests that enabling cross-patch interactions during pretraining enhances visual representation learning and tightens cross-modal alignment. + +Interleaving Pure Text Data During Pretraining. We analyze the impact of mixing SlimPajama text data with image-text pairs during pretraining. The results, as presented in Table 7 #2, reveal that mixing in pure text data consistently improves performance across vision and language benchmarks. This finding underscores the importance of preserving language capabilities in the LLM when training Single Transformer models, as maintaining strong language skills is crucial for building a multimodal model capable of complex reasoning. Currently, incorporating text data in training is one of the effective methods to maintain the language abilities of the model. + +In conclusion, our ablation studies identify key design choices for training SAIL effectively. Using bi-directional attention with multimodal rotary position embeddings enhances visual perception, while incorporating pure text data preserves essential language capabilities for robust multimodal performance. + +# 5. Conclusion + +In this work, we conducted an extensive analysis of Single Transformer-based MLLMs compared to modular MLLMs. Our investigation explored the unique properties of Single Transformers, including scalability, cross-modal information flow patterns, and visual representation capabilities. A series of experiments on our trained SAIL model demonstrated that this unified architecture achieves performance on vision-language benchmarks comparable to modular MLLMs while also functioning effectively as a vision backbone. Our findings highlight several advantages of Single Transformer architectures, such as superior data scalability, vision-centric information flow, and inherent capabilities as a powerful vision encoder. We hope our empirical findings will inspire further research to refine and enhance Single Transformer architectures, advancing the field of multimodal intelligence. + +# Appendix + +In the appendix, we provide additional experimental details and results. + +# Additional Experimental Details + +Training Configurations. In this section, we provide the corresponding setups for our experiment series in the main paper, including the default setting, the data scaling series, the model scaling series, and ablation experiment settings. The detailed configurations are shown in Table 8. + +Evaluation Configurations. In the main paper, we measure the model performance on several benchmarks: VQAv2 [28], GQA [36], ScienceQA-IMG [52], TextVQA [65], POPE [44], MME [46], MMBench [50], and SEEDBench-IMG [40]. We normalized the performance to a full score of 100 and averaged the performance across these benchmarks to plot the curves shown in Figure 1(A) and Figure 3. The detailed experimental results are shown in Table 9. + +# Additional Experimental Results + +A comparison of SAIL and LLaVA1.5. In this section, we conduct an experiment to compare SAIL with LLaVA1.5 [47]. In this experiment, our SAIL is trained on 512M image-text pairs in Pretraining Stage 1, followed by fine-tuning on the LLaVA-mix-665K dataset. To fairly compare the performance of the two models, we do not use the anyres strategy during SFT. Instead, we adopt the same image processing approach as LLaVA1.5, ensuring that the aspect ratio and number of image tokens are consistent across both models. + +The experimental results are presented in Table 10. Despite our model being trained on only 512M image-text pairs, which is significantly smaller than the CLIP pretrain- + +ing data used in the LLaVA1.5 model, the results show that our model achieves comparable performance to LLaVA1.5 across various benchmarks. Remarkably, our model even outperforms LLaVA1.5 on specific benchmarks such as DocVQA and ChartVQA. + +These findings highlight the strong potential of Single Transformer models in terms of data scaling. Specifically, they suggest that even with a relatively smaller pretraining dataset, Single Transformer models can perform on par with, or even exceed, more extensively trained modular MLLMs like LLaVA1.5 when similar preprocessing and controlled variables are applied. + +Compare SAIL and LLaVA on MMVP. We compare SAIL and LLaVA1.5 [47] on MMVP [70] to dissect the behavior of the two models. The results are shown in Figure 5. From examples (A) and (B), we observe that SAIL performs better in perceiving minor regions and objects. Examples (C) and (D) illustrate that SAIL can more accurately distinguish the states of objects. + +Additional Experiments on Information Flow Pattern Analysis. In the main paper, we analyzed the distribution patterns of image attention scores for different Single Transformer-based MLLMs and modular MLLMs. The results showed that Single Transformer-based MLLMs allocate more attention weights to image tokens. However, this could be due to different models processing varying numbers of image tokens, where more image tokens lead to higher aggregated attention scores. + +To analyze this in a more controlled manner, we designed an additional experiment. Using the data scaling setup at 512M, we pretrained SAIL and its modular MLLM counterpart. After pretraining, we fine-tuned both models using the LLaVA-mix-665K dataset, fixing the resolution size to 224x224 during SFT, instead of using any resolution. + +The results, shown in Figure 6, reveal that SAIL allocates higher attention scores to image tokens across all transformer layers compared to the modular MLLM, particularly in medium layers $(+43.5\%)$ in layer 14) and deep layers $(+41.2\%)$ in layer 31). + +From this, we can conclude that Single Transformer-based MLLMs tend to allocate a significant portion of attention to previous image tokens during prediction. In contrast, modular MLLMs allocate a smaller portion of their attention directly to image tokens, indicating a less image-centric approach in their prediction mechanism. + +Attention Map Visualization. In the main paper, we found that Single Transformer-based MLLMs allocate a large portion of attention weights to image tokens during inference, indicating a more vision-centric model. Here, we visualize the attention distribution of SAIL across different regions of the image when predicting tokens. + +The results in Figure 7 illustrate the attention maps for + +
ExpModelLLMStage 1Stage 2SFT
DataLRDataLRDataLR
Figure 1(A)SAIL, point 32MMistral-7B-v0.1Standard Stage 1 Data (32M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(A)SAIL, point 128MMistral-7B-v0.1Standard Stage 1 Data (128M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(A), Table 6SAIL, point512MMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(B), Table 2SAILMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)Standard Stage 2 Data (1e-5, 5e-6)Standard SFT Data(1e-5,0)
Table 3, 4, 5SAILMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)----
Figure 3, Table 7SAIL-0.5BQwen2.5-0.5BStandard Stage 1 Data (128M image-text pairs) (5e-4, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 3SAIL-3BQwen2.5-3BStandard Stage 1 Data (128M image-text pairs) (1e-4, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 3SAIL-7BMistral-7B-v0.1Standard Stage 1 Data (128M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
+ +Table 8. Experimental Configurations for Various Settings. The table lists the models used, the specific LLM variants, the datasets, and learning rates (LR) applied during each training stage (Pretraining Stage 1, Pretraining Stage 2, and SFT). "Standard Stage 1 Data", "Standard Stage 2 Data" and "Standard SFT Data" are listed in Table 1. Specific points and tables/figures referred to in the text are also indicated. + +
ModelVQAv2GQASciQA-IMGTextVQAPOPEMMEMMBenchSEEDBench-IMGNorm(avg
Figure 1, modular MLLM, 32M76.9658.768.4858.6888.17159969.4470.3161.41
Figure 1, modular MLLM, 128M78.4759.7870.0559.8286.78163868.5768.1161.52
Figure 1, modular MLLM, 512M80.0662.3870.3457.8583.14137970.8269.8361.86
Figure 1, SAIL, 32M70.5157.9563.3231.6781.77142148.2261.5151.93
Figure 1, SAIL, 128M76.3660.9362.6156.8685.5145853.9466.6057.91
Figure 1, SAIL, 512M78.5162.0667.4863.9486.04153056.7168.8360.51
Figure 3, SAIL-3B67.353.263.830.966.9820.844.655.447.80
Figure 3, SAIL-0.5B59.146.959.620.159.8761.4538.535.139.92
+ +Table 9. Detailed experimental results in the main paper. + +
MethodPretrainSFTVQAv2GQASciQA-IMGTextVQAPOPEMMBenchSEEDbenchDocVQAChartQAAI2DMMStaravg
LLaVA-1.5-336px [47]12.8B+558K665K78.562.066.858.285.964.366.128.118.254.832.458.3
SAIL512M665K77.861.668.056.486.661.369.829.321.558.737.159.1
+ +Table 10. Comparison of SAIL and LLaVA1.5. We evaluate the models on VQAv2 [28], GQA [36], ScienceQA [52], TextVQA [65], POPE [44], MMBench [50], SEEDBench [40], DocVQA [56], ChartQA [55], AI2D [39] and MMStar [9]. + +specific tokens to the image portion when SAIL generates predictions for multimodal queries. The visualizations show that in the early transformer layers, the predicted tokens primarily focus on the salient regions of the image. As the model progresses to deeper layers, the attention shifts to areas more relevant to the predicted tokens. This behavior demonstrates that SAIL has the potential to function as a grounding model, effectively correlating text tokens with their corresponding image regions. + +In other words, during inference, the model incrementally concentrates attention weights on relevant regions, aiding in decision-making. This progressive focusing of attention signifies the model's capability to ground text tokens in the corresponding visual context, enhancing its performance in vision-language tasks. + +Visual Understanding Demonstration. We investigate several vision perception and reasoning capabilities of our SAIL. These include its ability to understand rich OCR information (Table 11), interpret real-world scenes (Table 12), comprehend scientific charts (Table 13), and analyze poster contents (Table 14). + +![](images/eb6a28a28a20b99c926f16b7385ebc1e417e5d659893ab587113a8b6f7597b5d.jpg) +(A) Are there patterns on the easter eggs? + +![](images/7637efd9ee2d43b4d77ee2d1c219080912e7103759b698c9370945ebd8bfd6db.jpg) +GT: Yes; No + +![](images/a1c62c760920c7d865b90215f1f47dbfe4133eedf1aaf8f9b80698512acfbfef.jpg) +SAIL: Yes; No +(C) Are the birds flapping upward or downward? +GT: Upward; Downward +SAIL: Upward; Downward +LLaVA1.5: Upward; Upward +Figure 5. Comparison of SAIL and LLaVA1.5 on MMVP examples. SAIL demonstrates better performance in perceiving minor regions and objects, as well as more accurately distinguishing object states. + +![](images/d6cdb7ea2a12e15ebee32a5cc29d4041d12e9a7a69f66eeb92347e9b7ed99910.jpg) +LLaVA1.5: Yes; Yes + +![](images/9064d8a14fd89c407062d6f65d9550a3b3b10c965b254fe31126fd6726721b04.jpg) +(B) Are there any words displayed on the vehicle's lightbar? +GT: Yes; No + +![](images/c743f0fe8f49bafa26967c3602456ccc131b4c41de5f8eefbc16ca5bcc4d4e12.jpg) + +![](images/5cf82c40e2027f2eec837df6873535efaf1d21cde37f0251e7f842f4a80e5417.jpg) +SAIL: Yes; No +LLaVA1.5: Yes; Yes +(D) Is the elephant's trunk raised or lowered? +GT: Raised; Lowered +SAIL: Raised; Lowered +LLaVA1.5: Lowered; Lowered + +![](images/d74c79b2594ba8ce53efc20e4751a99dfed385f78b166c083f55da888145dbd3.jpg) + +![](images/86ea22e9302f393c6faa266a08d690c1d93155388ab664b38df9f56bb5bac5f9.jpg) +Figure 6. Image attention score allocation for SAIL and its modular MLLM counterpart. We compared the attention score allocation distribution for shallow layers, medium layers, and deep layers between these two models. The Single Transformer-based MLLM model significantly allocates a higher proportion of attention score to image tokens during prediction than the modular MLLM. + +# Query: When was the travel agency founded? + +![](images/72403d5df5d4cd1878d5e33c5bd4d9fa9dd848225e493287b525c8fda5a85e1d.jpg) +Output token: "2", Layer 2 + +![](images/ca7b4d2d7629b3566f0d32cb2b1bd1e264daec4f37ae471c87d0ab3de85774dd.jpg) +Output token: “2”, Layer 25 + +# Query: What color is the Bicycle? + +![](images/14245205aa410e48bd6a91506bad91fa760f5c60d89237bfedd1e75ea248e219.jpg) +Output token: "red", Layer 0 + +![](images/45ceef6727c7a3c12e1dcef96d22949a055822dcf3dcdc91bdcc1b96cfdab9b9.jpg) +Output token: "red", Layer 31 +Figure 7. Visualization of SAIL's attention distribution across image regions during token prediction. In early transformer layers, attention primarily focuses on the salient regions of the image. As the model progresses to deeper layers, attention shifts to areas more relevant to the predicted tokens. + +# Example 1: Understanding OCR Information in Webpage. + +![](images/20f7c01d41fb79fbaccfe67a2b0667fe76827939bc0931c034efc63a00dd6df2.jpg) + +# User Please output all the text information. + +SAIL Cornell University arXiv cs arXiv:2304.08485 Computer Science Computer Vision and Pattern Recognition Visual Instruction Tuning HaoTian Liu, Chunyuan Li, Qingyang Wu, Yong Jae Lee Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLM for general-purpose visual and language visual and language this http URL URL Early Experiments: LLMs demonstrate impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLAVA and GPT-4 achieves a new state-of-the-art accuracy of $92.53\%$ . We make GPT-4 generated visual instruction tuning data, our model and code base publicly available. Comments: NeurIPS 2023 Oral; project page: this https URL Subjects: Computer Vision and Pattern Recognition (cs.CV); Artificial Intelligence (cs.AI); Computation and Language (cs.CL); Machine Learning (cs.LG) Cite as: arXiv:2304.08485 [cs.CV] (or arXiv:2304.08485v2 [cs.CV] for this version) https://doi.org/10.48550/arXiv.2304.08485 Submission history From: Haotian Liu [view email] [v1] Mon, 17 Apr 2023 17:59:25 UTC (4,360 KB) [v2] Mon, 11 Dec 2023 17:46:14 UTC (4,985 KB) + +Table 11. SAIL demonstrates a strong capability to extract information in OCR-rich scenarios. + +# Example 2: Understanding Real-World Scene. + +![](images/cbf681ad40eed4080496a59ec1cab9482e6085a739b47fb697bc4e5fa99eabde.jpg) +User Please describe the image in detail. + +# SAIL There are several notable elements in the image: + +1. The highway is multi-lane, with multiple lanes visible. 2. The traffic is dense, with vehicles in various lanes. 3. The weather seems to be overcast, as indicated by the grayish tones of the image. 4. There are several traffic signs and signs visible, including a green highway sign indicating "1/2 mile" to the right, and a green sign with the number "16" and the word "MILE" on the left side of the image. 5. The background includes a hill or mountain, with trees and greenery visible. 6. There are also several power lines and electrical pylons visible along the sides of the highway. The image's focus is on the traffic and the surrounding environment. The vehicles are primarily in the center and right lanes of the highway, with some vehicles in the left lanes as well. The highway is flanked by lush greenery, including trees and grassy areas, suggesting that this is a rural or possibly a suburban area. The traffic appears to be moving at a moderate pace. The weather conditions suggest that this is a daytime scene, possibly during the morning or late afternoon. The image is taken from a perspective that suggests the viewer is in a vehicle on the highway, observing the traffic ahead. In summary, the image captures a typical highway scene with moderate traffic, surrounded by a mix of natural and built environments, under an overcast sky. The road is in good condition, and the vehicles are traveling smoothly in both directions. + +Table 12. SAIL demonstrates a strong capability about understanding the real-world scene. + +# Example 3: Understanding the Scientific Charts. + +![](images/d4d1f138be91e3ae0a2bdd84b923df543b8646ec19b28b01a2a51525de73dedb.jpg) + +![](images/61e34a4823cfcf7abfbd05b2e685db45e3739a868869559e15d3920fe25b1d5f.jpg) + +![](images/6e114755f6e4fbd24b51e09e065d2fd1a5dd224fab94ab7cfcd565e63799f6e7.jpg) + +User Please describe the image in detail. +![](images/2aa1f308a5e4d1cfb777d32372c4c543ebda75253e771dd662da3c40a4a9b1bf.jpg) +This image is a collage of various objects, foods, and animals, all of which are predominantly blue or have blue elements. The image is divided into sections labeled with letters from A to O. Here is a detailed description of each section: 1. A - Pumpkin: A blue pumpkin is depicted, which is a common symbol associated with autumn and Halloween. 2. B - Fish: A blue fish is shown, which could be a representation of a common aquatic animal. 3. C - M&M: A blue M&M candy is present.4. D - Milk and Soap: A glass of milk, a bar of soap, and a blue sponge.5. E - Bread and Jelly Beans: Blue wheat, a blue loaf of bread, and blue jelly beans.6. F - Castor Oil: A bottle of castor oil.7. G - Cheerios and Snacks: A box of Cheerios, a glass of a dark blue drink, a blue popsicle, a blue donut, and a blue muffin. 8. H - Vegetables: A collection of vegetables including a blue cucumber, green leafy vegetables, and a bunch of blue carrots.9. I - Apple and Stones: A green apple, a blue candy, and blue stones.10. J - Syrup and Candy: Sugar cubes, a bottle of blue syrup, and a blue candy bar.11. K - Coconut and Aloe: A blue coconut, a bottle of blue liquid, and a blue aloe vera plant.12. L - Pineapple and Juice: A blue pineapple, a blue banana, and a glass of blue juice.13. M - Candy Bar: A blue candy bar.14. N - Blueberries: Blueberries.15. O - Oval Object: A blue oval-shaped object. + +![](images/88ca0006929491eddb21a3c71ddbb465d70cdc0896585136bc6f707c683132d0.jpg) + +![](images/7f811cf5851728d6bfd1446690b26f8821065017714d04d06116c144ae5224a7.jpg) + +![](images/5c84f7550aeaf587e504883e32f2563a103b1875b315867b44936956a2766de4.jpg) + +![](images/c3f078f825632a0a8ecfc0f90209ccb5bb4ce6fa98f6b3253c233d20f4b9b7d4.jpg) + +![](images/476bc9f3b006d0369e3d41f7a4f60b0559aa33a82c981e4c5ddb4b3946ce37ed.jpg) + +![](images/957eb719d56772c3ecd3d2e4f2915db952b9755d843d25088697d3eafffc2835.jpg) + +Table 13. SAIL demonstrates a strong capability about understanding the scientific charts. + +# Example 4: Understanding Poster Content. + +![](images/fd5dd7896f1375e93dd6ec6d4a24135dfe379dae2d45342a2a5c556982984744.jpg) +User Please describe the image in detail. + +SAIL At the top of the image, there is a decorative border with a pattern of hanging golden ornaments, which adds a festive and celebratory feel to the overall design. The background is plain white, which helps the colorful elements stand out. In the center of the image, the text "Happy Sankranti" is prominently displayed in a large, elegant, and stylized font. The text is in a warm orange color, which is traditionally associated with the festival. Below the text, there is a depiction of a woman dressed in traditional Indian attire, likely a saree, in shades of blue and green. She is engaged in the act of preparing a traditional Sankranti sweet, which is a common practice during the festival. The sweet is shaped like a pyramid and is decorated with intricate patterns, reflecting the artistic and cultural heritage of the region. To the left of the woman, there is a three-tiered pot, known as a "patala", which is a traditional Indian cooking vessel. The pot is adorned with a colorful design and is placed on a colorful circular mat, which is also a traditional element in Indian households. On the right side of the woman, there is a small hut with a thatched roof, which is a common architectural style in rural Indian villages. The hut is depicted in a simple and rustic manner, emphasizing the rural and traditional aspect of the scene. Overall, the image captures the essence of the Sankranti festival, highlighting the cultural and religious aspects of the celebration. The use of traditional clothing, the preparation of traditional sweets, and the depiction of a rural village scene all contribute to a sense of cultural authenticity and celebration. + +Table 14. SAIL demonstrates a strong capability about understanding the poster content. + +# References + +[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022. 2 +[2] AI Anthropic. The claude 3 model family: Opus, sonnet, haiku. Claude-3 Model Card, 2024. 1 +[3] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023.1, 2, 6 +[4] Hangbo Bao, Li Dong, Songhao Piao, and Furu Wei. Beit: Bert pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021.3 +[5] Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, and Sagnak Tasirlar. Introducing our multimodal models, 2023. 2, 5, 6 +[6] Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. Conceptual 12m: Pushing web-scale image-text pretraining to recognize long-tail visual concepts. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3558-3568, 2021. 3 +[7] Jun Chen, Deyao Zhu, Xiaogian Shen, Xiang Li, Zechun Liu, Pengchuan Zhang, Raghuraman Krishnamoorthi, Vikas Chandra, Yunyang Xiong, and Mohamed Elhoseiny. Minigpt-v2: large language model as a unified interface for vision-language multi-task learning. arXiv preprint arXiv:2310.09478, 2023. 2 +[8] Jieneng Chen, Qihang Yu, Xiaohui Shen, Alan Yuille, and Liang-Chieh Chen. Vitamin: Designing scalable vision models in the vision-language era. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3 +[9] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 5, 6, 10 +[10] Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. In European Conference on Computer Vision, pages 19-35. Springer, 2024. 7 +[11] Yangyi Chen, Xingyao Wang, Hao Peng, and Heng Ji. A single transformer for scalable vision-language modeling. Transactions on Machine Learning Research, 2024. 1, 2, 3, 4, 5, 6, 7 +[12] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. arXiv preprint arXiv:2205.08534, 2022. 5, 6 +[13] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing + +the gap to commercial multimodal models with open-source suites. Science China Information Sciences, 67(12):220101, 2024. 6 +[14] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 2, 3, 6 +[15] Mehdi Cherti, Romain Beaumont, Ross Wightman, Mitchell Wortsman, Gabriel Ilharco, Cade Gordon, Christoph Schuhmann, Ludwig Schmidt, and Jenia Jitsev. Reproducible scaling laws for contrastive language-image learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2818-2829, 2023. 1, 2, 3, 6, 8 +[16] Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhang-hao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yong-hao Zhuang, Joseph E. Gonzalez, Ion Stoica, and Eric P. Xing. Vicuna: An open-source chatbot impressing gpt-4 with $90\%$ * chatgpt quality, 2023. 1 +[17] MMSegmentation Contributors. MMSegmentation: Openmmlab semantic segmentation toolbox and benchmark. https://github.com/open-mmlab/mmsegmentation, 2020.5 +[18] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. InstructBLIP: Towards general-purpose vision-language models with instruction tuning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1 +[19] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Peter Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdul-mohsin, et al. Scaling vision transformers to 22 billion parameters. In International Conference on Machine Learning, pages 7480-7512. PMLR, 2023. 3, 6 +[20] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 3, 5 +[21] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. In Advances in Neural Information Processing Systems, pages 52545-52567. Curran Associates, Inc., 2024. 1, 2, 3, 4, 5, 6, 7 +[22] Haiwen Diao, Xiaotong Li, Yufeng Cui, Yueze Wang, Haoge Deng, Ting Pan, Wenxuan Wang, Huchuan Lu, and Xinlong Wang. Evev2: Improved baselines for encoder-free vision-language models. arXiv preprint arXiv:2502.06788, 2025. 5, 6 +[23] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3 + +[24] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024. 6 +[25] Alaaeldin El-Nouby, Michal Klein, Shuangfei Zhai, Miguel Angel Bautista, Alexander Toshev, Vaishaal Shankar, Joshua M Susskind, and Armand Joulin. Scalable pretraining of large autoregressive image models. arXiv preprint arXiv:2401.08541, 2024. 5 +[26] Yuxin Fang, Wen Wang, Binhui Xie, Quan Sun, Ledell Wu, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. Eva: Exploring the limits of masked visual representation learning at scale. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 19358-19369, 2023. 3 +[27] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, et al. Datacomp: In search of the next generation of multimodal datasets. Advances in Neural Information Processing Systems, 36:27092-27112, 2023. 3 +[28] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 8, 9, 10 +[29] Shuhao Gu, Jialing Zhang, Siyuan Zhou, Kevin Yu, Zhaohu Xing, Liangdong Wang, Zhou Cao, Jintao Jia, Zhuoyi Zhang, Yixuan Wang, et al. Infinity-mm: Scaling multimodal performance with large-scale and high-quality instruction data. arXiv preprint arXiv:2410.18558, 2024. 4, 5 +[30] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14375-14385, 2024. 5, 6 +[31] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 3 +[32] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 5 +[33] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, et al. Cogagent: A visual language model for gui agents. arXiv preprint arXiv:2312.08914, 2023. 2 +[34] Xinyu Huang, Youcai Zhang, Jinyu Ma, Weiwei Tian, Rui Feng, Yuejie Zhang, Yaqian Li, Yandong Guo, and Lei + +Zhang. Tag2text: Guiding vision-language model via image tagging. arXiv preprint arXiv:2303.05657, 2023. 3 +[35] Zilong Huang, Qinghao Ye, Bingyi Kang, Jiashi Feng, and Haoqi Fan. Classification done right for vision-language pretraining. In NeurIPS, 2024. 3 +[36] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6700-6709, 2019. 8, 9, 10 +[37] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International conference on machine learning, pages 4904-4916. PMLR, 2021. 3 +[38] Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. Mistral 7b. arXiv preprint arXiv:2310.06825, 2023. 1 +[39] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images, 2016. 5, 6, 10 +[40] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023. 5, 6, 8, 9, 10 +[41] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 3, 5, 6, 7 +[42] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for clip training. In NeurIPS, 2023. 3 +[43] Xianhang Li, Haoqin Tu, Mude Hui, Zeyu Wang, Bingchen Zhao, Junfei Xiao, Sucheng Ren, Jieru Mei, Qing Liu, Huangjie Zheng, Yuyin Zhou, and Cihang Xie. What if we recapture billions of web images with llama-3? arXiv preprint arXiv:2406.08478, 2024. 4, 5 +[44] Yifan Li, Yifan Du, Kun Zhou, Jinping Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023. 6, 9, 10 +[45] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23390-23400, 2023. 3 +[46] Zijing Liang, Yanjie Xu, Yifan Hong, Penghui Shang, Qi Wang, Qiang Fu, and Ke Liu. A survey of multimodel large language models. In Proceedings of the 3rd International Conference on Computer, Artificial Intelligence and Control Engineering, pages 405-409, 2024. 5, 6, 8, 9 +[47] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 6, 7, 9, 10 + +[48] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava next: Improved reasoning,OCR, and world knowledge, 2024.3,6 +[49] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. 1, 2, 3 +[50] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In European conference on computer vision, pages 216-233. Springer, 2024. 5, 6, 8, 9, 10 +[51] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. Ocrbench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 67(12):220102, 2024. 5, 6 +[52] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. Advances in Neural Information Processing Systems, 35:2507-2521, 2022. 5, 6, 8, 9, 10 +[53] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 5, 6 +[54] Gen Luo, Xue Yang, Wenhan Dou, Zhaokai Wang, Jiawen Liu, Jifeng Dai, Yu Qiao, and Xizhou Zhu. Mono-internvl: Pushing the boundaries of monolithic multimodal large language models with endogenous visual pre-training. arXiv preprint arXiv:2410.08202, 2024. 1, 2, 3, 5, 6 +[55] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 10 +[56] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF winter conference on applications of computer vision, pages 2200-2209, 2021. 10 +[57] Sachin Mehta, Maxwell Horton, Fartash Faghri, Mohammad Hossein Sekhavat, Mahyar Najibi, Mehrdad Farajtabar, Oncel Tuzel, and Mohammad Rastegari. Catlip: Clipsevel visual recognition accuracy with 2.7 x faster pretraining on web-scale image-text data. arXiv preprint arXiv:2404.15653, 2024. 3 +[58] Meta. Introducing meta llama 3: The most capable openly available llm to date, 2024. Accessed: 2024-04-18. 1 +[59] OpenAI. Introducing chatgpt. OpenAI Blog, 2021. +[60] OpenAI. Gpt-4 technical report, 2023. 1 +[61] OpenAI. Gpt-4v(ision) system card, 2023. 1, 2 +[62] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 1, 2, 3, 6, 8 + +[63] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. LAION-5B: An open large-scale dataset for training next generation image-text models. In NeurlPS, 2022. 3, 4, 5 +[64] Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatronlm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053, 2019. 5 +[65] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8317-8326, 2019. 5, 6, 8, 9, 10 +[66] Daria Soboleva, Faisal Al-Khateeb, Robert Myers, Jacob R Steeves, Joel Hestness, and Nolan Dey. SlimPajama: A 627B token cleaned and deduplicated version of RedPajama. https://cerebras.ai/blog/slimpajama-a-627b-token-cleaned-and-deduplicated-version-of-redpajama, 2023.4.5 +[67] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 2, 6 +[68] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 2 +[69] Peter Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Adithya Jairam Vedagiri IYER, Sai Charitha Akula, Shusheng Yang, Jihan Yang, Manoj Middepogu, Ziteng Wang, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. Advances in Neural Information Processing Systems, 37:87310-87356, 2024. 6 +[70] Shengbang Tong, Zhuang Liu, Yuexiang Zhai, Yi Ma, Yann LeCun, and Saining Xie. Eyes wide shut? exploring the visual shortcomings of multimodal llms. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9568-9578, 2024. 9 +[71] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. Advances in Neural Information Processing Systems, 36:46830-46855, 2023. 3, 8 +[72] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. Advances in Neural Information Processing Systems, 36:46830-46855, 2023. 6 +[73] Haochen Wang, Junsong Fan, Yuxi Wang, Kaiyou Song, Tiancai Wang, Xiangyu Zhang, and Zhaoxiang Zhang. Bootstrap masked visual modeling via hard patches mining. arXiv preprint arXiv:2312.13714, 2023. 5 +[74] Haochen Wang, Kaiyou Song, Junsong Fan, Yuxi Wang, Jin Xie, and Zhaoxiang Zhang. Hard patches mining for masked image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10375-10385, 2023. 5 + +[75] Haochen Wang, Anlin Zheng, Yucheng Zhao, Tiancai Wang, Ge Zheng, Xiangyu Zhang, and Zhaoxiang Zhang. Reconstructive visual instruction tuning. In International Conference on Learning Representations, 2025. 2 +[76] Jiacong Wang, Bohong Wu, Haiyong Jiang, Zhou Xun, Xin Xiao, Haoyuan Guo, and Jun Xiao. World to code: Multimodal data generation via self-instructed compositional captioning and filtering. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4608-4623, 2024. 1 +[77] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution, 2024. 1, 2, 3, 7 +[78] Wenxuan Wang, Quan Sun, Fan Zhang, Yepeng Tang, Jing Liu, and Xinlong Wang. Diffusion feedback helps clip see better. arXiv preprint arXiv:2407.20171, 2024. 3 +[79] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 2, 3, 6 +[80] Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. SimVLM: Simple visual language model pretraining with weak supervision. In International Conference on Learning Representations, 2022. 3 +[81] X.ai. Grok-1.5 vision preview. https://x.ai/blog/grok-1.5v, 2024.5, 6 +[82] Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, and Jian Sun. Unified perceptual parsing for scene understanding. In Proceedings of the European conference on computer vision (ECCV), pages 418-434, 2018. 5, 6 +[83] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 3 +[84] Qiying Yu, Quan Sun, Xiaosong Zhang, Yufeng Cui, Fan Zhang, Yue Cao, Xinlong Wang, and Jingjing Liu. Capsfusion: Rethinking image-text data at scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14022-14032, 2024. 4, 5 +[85] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 5, 6 +[86] Mert Yuksekgonul, Federico Bianchi, Pratyusha Kalluri, Dan Jurafsky, and James Zou. When and why vision-language models behave like bags-of-words, and what to do about it? arXiv preprint arXiv:2210.01936, 2022. 5, 6 +[87] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11975-11986, 2023. 3, 7, 8 +[88] Tao Zhang, Xiangtai Li, Zilong Huang, Yanwei Li, Weixian Lei, Xueqing Deng, Shihao Chen, Shunping Ji, and Jiashi + +Feng. Pixel-sail: Single transformer for pixel-grounded understanding. arXiv, 2025. 6 +[89] Youcai Zhang, Xinyu Huang, Jinyu Ma, Zhaoyang Li, Zhaochuan Luo, Yanchun Xie, Yuzhuo Qin, Tong Luo, Yaqian Li, Shilong Liu, et al. Recognize anything: A strong image tagging model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1724-1732, 2024. 3 +[90] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017. 5 +[91] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 1, 2, 3 \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10462/images/02d4e6e0e828472c97f4452200369a3fa7e8e4ab06ccd32a8e320982bf2f5130.jpg b/data/2025/2504_10xxx/2504.10462/images/02d4e6e0e828472c97f4452200369a3fa7e8e4ab06ccd32a8e320982bf2f5130.jpg new file mode 100644 index 0000000000000000000000000000000000000000..67f48f8cf6ccd531d2579106abc7320d39edb95d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/02d4e6e0e828472c97f4452200369a3fa7e8e4ab06ccd32a8e320982bf2f5130.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9172b812ab8735603db613d0ce3669b023f84e363dc6386a021bb28526840617 +size 60423 diff --git a/data/2025/2504_10xxx/2504.10462/images/08e984e5b7eac62672319f52aa00b0a3fb3f0b7e9bc754dbd7bad2e6f2348257.jpg b/data/2025/2504_10xxx/2504.10462/images/08e984e5b7eac62672319f52aa00b0a3fb3f0b7e9bc754dbd7bad2e6f2348257.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dbbe37fd6a0982547100c42bc18297da8cb99b69 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/08e984e5b7eac62672319f52aa00b0a3fb3f0b7e9bc754dbd7bad2e6f2348257.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f844d46c83ce5c2f8b342cd5a665004ef88b178134931222af9b4f1a30a5e58 +size 8554 diff --git a/data/2025/2504_10xxx/2504.10462/images/0c8b865553e262229522d49de1c266ad4d74ff2efba965f1aceb91bc26705e7b.jpg b/data/2025/2504_10xxx/2504.10462/images/0c8b865553e262229522d49de1c266ad4d74ff2efba965f1aceb91bc26705e7b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cdc4bbc0e61e6ae7a3fe94cb2a42011310e47254 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/0c8b865553e262229522d49de1c266ad4d74ff2efba965f1aceb91bc26705e7b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06c46f3139edd6679a6a5194bb3cecec926e605895c9fa8a4151b7bb57430f5b +size 86040 diff --git a/data/2025/2504_10xxx/2504.10462/images/0cb729d503de7c017da687d7d77c3458500e81ea0c18677e9175ecc68a34137c.jpg b/data/2025/2504_10xxx/2504.10462/images/0cb729d503de7c017da687d7d77c3458500e81ea0c18677e9175ecc68a34137c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..876f2ecfa97f5593234e57e3f5a76028c8cc9712 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/0cb729d503de7c017da687d7d77c3458500e81ea0c18677e9175ecc68a34137c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1b7d0bd4d6e4e6bbf97aac14b1927f7b4112394b78aefafaaa1d24a06695f36 +size 147062 diff --git a/data/2025/2504_10xxx/2504.10462/images/14245205aa410e48bd6a91506bad91fa760f5c60d89237bfedd1e75ea248e219.jpg b/data/2025/2504_10xxx/2504.10462/images/14245205aa410e48bd6a91506bad91fa760f5c60d89237bfedd1e75ea248e219.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f30e1825e45e1d1eb90bc6947c0d9c95c8d150f5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/14245205aa410e48bd6a91506bad91fa760f5c60d89237bfedd1e75ea248e219.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6024e0be1865861d5edb0b939bf086a469963f50109781d7a0326eccddbace71 +size 32430 diff --git a/data/2025/2504_10xxx/2504.10462/images/20f7c01d41fb79fbaccfe67a2b0667fe76827939bc0931c034efc63a00dd6df2.jpg b/data/2025/2504_10xxx/2504.10462/images/20f7c01d41fb79fbaccfe67a2b0667fe76827939bc0931c034efc63a00dd6df2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b89eb6acf5778315340176f3f2e34b87b43e21fd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/20f7c01d41fb79fbaccfe67a2b0667fe76827939bc0931c034efc63a00dd6df2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5e37ca02fe417910d378f862c9bb4e123e5df4093fd2b94e2c7c430e2e4f23d +size 53245 diff --git a/data/2025/2504_10xxx/2504.10462/images/23f836472eb6724001df347ec8381d670f86dae4957d99f0de1396cfaa1bc7f7.jpg b/data/2025/2504_10xxx/2504.10462/images/23f836472eb6724001df347ec8381d670f86dae4957d99f0de1396cfaa1bc7f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..edbcf59ec30cbed1f31b5c34d8b2c41e0d63ca8d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/23f836472eb6724001df347ec8381d670f86dae4957d99f0de1396cfaa1bc7f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb75f1a3fc2a13cc82afc433499143bb6c89d467630f3b2afb954f5f7f3f890a +size 41050 diff --git a/data/2025/2504_10xxx/2504.10462/images/2aa1f308a5e4d1cfb777d32372c4c543ebda75253e771dd662da3c40a4a9b1bf.jpg b/data/2025/2504_10xxx/2504.10462/images/2aa1f308a5e4d1cfb777d32372c4c543ebda75253e771dd662da3c40a4a9b1bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c058a6a307555f118cf4120bc3605fa050c4c05e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/2aa1f308a5e4d1cfb777d32372c4c543ebda75253e771dd662da3c40a4a9b1bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:984063fd932dd5222e0679e0b130bdc29b45806820e58c5786d8053209a20ab3 +size 10018 diff --git a/data/2025/2504_10xxx/2504.10462/images/35b227d6c52d257cd44604f0355dc61261e44c4898ffbe3227dfe22da29d8176.jpg b/data/2025/2504_10xxx/2504.10462/images/35b227d6c52d257cd44604f0355dc61261e44c4898ffbe3227dfe22da29d8176.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48c9a4306d3d944fb7b857364ead058448b4d76b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/35b227d6c52d257cd44604f0355dc61261e44c4898ffbe3227dfe22da29d8176.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe878fcfacd86948c332bd097590d220774f276d5e67272829c8f408e24b3b2a +size 26391 diff --git a/data/2025/2504_10xxx/2504.10462/images/3bb96e8beba6754511e44dc43585c34466a7f1e614f055714a6d3c6d31caaa15.jpg b/data/2025/2504_10xxx/2504.10462/images/3bb96e8beba6754511e44dc43585c34466a7f1e614f055714a6d3c6d31caaa15.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17b843217eb14858f9c096a86b97fc0a8897dca2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/3bb96e8beba6754511e44dc43585c34466a7f1e614f055714a6d3c6d31caaa15.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24c91b196552c9b6c84ff6c55155fb3d5231ce2dfba4cb82fa54806c9efca869 +size 22594 diff --git a/data/2025/2504_10xxx/2504.10462/images/42cf6443894186feb0a1d9dae9b8337f49f0ae9fad351b32d6fc399fbad551af.jpg b/data/2025/2504_10xxx/2504.10462/images/42cf6443894186feb0a1d9dae9b8337f49f0ae9fad351b32d6fc399fbad551af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efa024d7dbd3a4b3c4637057226f49b1b8f055f4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/42cf6443894186feb0a1d9dae9b8337f49f0ae9fad351b32d6fc399fbad551af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b625687c8924b4c19fbdabea6b4dd04f3c8f5094aaca4b663e416ed365695050 +size 70290 diff --git a/data/2025/2504_10xxx/2504.10462/images/45ceef6727c7a3c12e1dcef96d22949a055822dcf3dcdc91bdcc1b96cfdab9b9.jpg b/data/2025/2504_10xxx/2504.10462/images/45ceef6727c7a3c12e1dcef96d22949a055822dcf3dcdc91bdcc1b96cfdab9b9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25a1ccea0fe3a18c244c7ee0cc570855b474507a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/45ceef6727c7a3c12e1dcef96d22949a055822dcf3dcdc91bdcc1b96cfdab9b9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6d1cbfebbc4c2d47b2c5264260d54b5270cac96c8b6f4ee7599af641800f309 +size 24260 diff --git a/data/2025/2504_10xxx/2504.10462/images/476bc9f3b006d0369e3d41f7a4f60b0559aa33a82c981e4c5ddb4b3946ce37ed.jpg b/data/2025/2504_10xxx/2504.10462/images/476bc9f3b006d0369e3d41f7a4f60b0559aa33a82c981e4c5ddb4b3946ce37ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c055a4849233be59c9de103af6e93eff2020d645 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/476bc9f3b006d0369e3d41f7a4f60b0559aa33a82c981e4c5ddb4b3946ce37ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e63edfd4dafeaf60a2be46307d07dab8896d1b86c545cfe62bb34b52dca147a +size 9566 diff --git a/data/2025/2504_10xxx/2504.10462/images/5c84f7550aeaf587e504883e32f2563a103b1875b315867b44936956a2766de4.jpg b/data/2025/2504_10xxx/2504.10462/images/5c84f7550aeaf587e504883e32f2563a103b1875b315867b44936956a2766de4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6bc54a2e4a1c9875636fe38a1c46b73c8b2be7a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/5c84f7550aeaf587e504883e32f2563a103b1875b315867b44936956a2766de4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:498dfe6797f253327cc1b5ca3649cb231f7c0e1378fe390723a6fd6d00ca43c6 +size 6256 diff --git a/data/2025/2504_10xxx/2504.10462/images/5cf82c40e2027f2eec837df6873535efaf1d21cde37f0251e7f842f4a80e5417.jpg b/data/2025/2504_10xxx/2504.10462/images/5cf82c40e2027f2eec837df6873535efaf1d21cde37f0251e7f842f4a80e5417.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28592fdfefc48adc0d92f30da42f5585dca3fcc4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/5cf82c40e2027f2eec837df6873535efaf1d21cde37f0251e7f842f4a80e5417.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5ebdeb5b48b65d06ca3ca31a7c98607098880ea84c4728ba7ee3bbb97f9193b +size 10083 diff --git a/data/2025/2504_10xxx/2504.10462/images/61e34a4823cfcf7abfbd05b2e685db45e3739a868869559e15d3920fe25b1d5f.jpg b/data/2025/2504_10xxx/2504.10462/images/61e34a4823cfcf7abfbd05b2e685db45e3739a868869559e15d3920fe25b1d5f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cd3bfce01e4fb549ab49b85ed67e0c209f99ade5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/61e34a4823cfcf7abfbd05b2e685db45e3739a868869559e15d3920fe25b1d5f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34a502a6f992efcc3bd29beabf7a6cb3c43da59e1e895f0bdc5e4a3181d69fe4 +size 5874 diff --git a/data/2025/2504_10xxx/2504.10462/images/63cbc17f7f3795c8081971b405697dd160c73dc06136bdfd24067ac6eaeeb4cf.jpg b/data/2025/2504_10xxx/2504.10462/images/63cbc17f7f3795c8081971b405697dd160c73dc06136bdfd24067ac6eaeeb4cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7976397e359036b7e32443c41119af7688820b2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/63cbc17f7f3795c8081971b405697dd160c73dc06136bdfd24067ac6eaeeb4cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7651f713863f5e4f262509e4ba7dee21ed1ac52e06dea70e42d0e5e3237ab5b +size 30119 diff --git a/data/2025/2504_10xxx/2504.10462/images/6e114755f6e4fbd24b51e09e065d2fd1a5dd224fab94ab7cfcd565e63799f6e7.jpg b/data/2025/2504_10xxx/2504.10462/images/6e114755f6e4fbd24b51e09e065d2fd1a5dd224fab94ab7cfcd565e63799f6e7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f1032c329bfd695924db6ad7dee5e1250ddd594 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/6e114755f6e4fbd24b51e09e065d2fd1a5dd224fab94ab7cfcd565e63799f6e7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49eaccc4b74ff0d418a70dc8da2913ece50b3cf081d379d785505a3900106921 +size 3761 diff --git a/data/2025/2504_10xxx/2504.10462/images/72403d5df5d4cd1878d5e33c5bd4d9fa9dd848225e493287b525c8fda5a85e1d.jpg b/data/2025/2504_10xxx/2504.10462/images/72403d5df5d4cd1878d5e33c5bd4d9fa9dd848225e493287b525c8fda5a85e1d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d758f0cf5cb7699b7169dd6361a53d4954a185b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/72403d5df5d4cd1878d5e33c5bd4d9fa9dd848225e493287b525c8fda5a85e1d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4f449acf61e29027878993d013922bcf2b1eba0ca9cb582c4f8ff4a6f70485f +size 18244 diff --git a/data/2025/2504_10xxx/2504.10462/images/74ee2a817ef3268b95e72eeb8f2fb43f406463207bb7cde5d1a436e7ac6a0bd7.jpg b/data/2025/2504_10xxx/2504.10462/images/74ee2a817ef3268b95e72eeb8f2fb43f406463207bb7cde5d1a436e7ac6a0bd7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0975cc896c6792381a74c4fba6fa240f93008c1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/74ee2a817ef3268b95e72eeb8f2fb43f406463207bb7cde5d1a436e7ac6a0bd7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:687b381080e6515d88cd6a42c553fcae33b334ed75887b95c370b92cb27065c0 +size 16349 diff --git a/data/2025/2504_10xxx/2504.10462/images/7637efd9ee2d43b4d77ee2d1c219080912e7103759b698c9370945ebd8bfd6db.jpg b/data/2025/2504_10xxx/2504.10462/images/7637efd9ee2d43b4d77ee2d1c219080912e7103759b698c9370945ebd8bfd6db.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a0f98c3c090d76833d824bf2f988c61f12309f0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/7637efd9ee2d43b4d77ee2d1c219080912e7103759b698c9370945ebd8bfd6db.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:558a5785b8ce6c050e2e5010687cb6682d0909fc40380f5c73663460cba802e1 +size 9260 diff --git a/data/2025/2504_10xxx/2504.10462/images/78c033c10d95a55f2ce039c403daf56a8cb6a605be2c6ec2a69fb139cd0433f2.jpg b/data/2025/2504_10xxx/2504.10462/images/78c033c10d95a55f2ce039c403daf56a8cb6a605be2c6ec2a69fb139cd0433f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a19de6c1e83f5e234ac2ef1b5a46fd6b8898709 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/78c033c10d95a55f2ce039c403daf56a8cb6a605be2c6ec2a69fb139cd0433f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19d995c235421a3333cc7e96a06d44d16fbcab39db1c3465c4fde51b485596e9 +size 38942 diff --git a/data/2025/2504_10xxx/2504.10462/images/7f811cf5851728d6bfd1446690b26f8821065017714d04d06116c144ae5224a7.jpg b/data/2025/2504_10xxx/2504.10462/images/7f811cf5851728d6bfd1446690b26f8821065017714d04d06116c144ae5224a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..974574965e9189669afdc05bee5417d75ff7b996 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/7f811cf5851728d6bfd1446690b26f8821065017714d04d06116c144ae5224a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6283e1b744d9ee7ca4c4934307e63ddeda179b148cad06016342b5a5fde028aa +size 13548 diff --git a/data/2025/2504_10xxx/2504.10462/images/86ea22e9302f393c6faa266a08d690c1d93155388ab664b38df9f56bb5bac5f9.jpg b/data/2025/2504_10xxx/2504.10462/images/86ea22e9302f393c6faa266a08d690c1d93155388ab664b38df9f56bb5bac5f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96d37bdc9771b62ba3c9e52920db31e7daa89e3f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/86ea22e9302f393c6faa266a08d690c1d93155388ab664b38df9f56bb5bac5f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ace5663b7d1b87ab4c7790de9c3da4f2066205f1a9c30e0372d0ed05ef639824 +size 49851 diff --git a/data/2025/2504_10xxx/2504.10462/images/87aacb4266bb5df777e2e46d47cbe6631fbba6a4cecc29791d9ff98e59ada6b7.jpg b/data/2025/2504_10xxx/2504.10462/images/87aacb4266bb5df777e2e46d47cbe6631fbba6a4cecc29791d9ff98e59ada6b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a422f09beb942b6e41a946fafc987383acd73702 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/87aacb4266bb5df777e2e46d47cbe6631fbba6a4cecc29791d9ff98e59ada6b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d95dda71edc8eff2f26eef7879d4ff48dbc992b1e723b37ef9977bc62ef040b4 +size 14034 diff --git a/data/2025/2504_10xxx/2504.10462/images/88ca0006929491eddb21a3c71ddbb465d70cdc0896585136bc6f707c683132d0.jpg b/data/2025/2504_10xxx/2504.10462/images/88ca0006929491eddb21a3c71ddbb465d70cdc0896585136bc6f707c683132d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7aba274f16573b691bcb1728c858798c6ab6055 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/88ca0006929491eddb21a3c71ddbb465d70cdc0896585136bc6f707c683132d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c8f3d3ab463806d62743efd07f95283e766329bbeda3faa86b5e10f968ff917 +size 8646 diff --git a/data/2025/2504_10xxx/2504.10462/images/9064d8a14fd89c407062d6f65d9550a3b3b10c965b254fe31126fd6726721b04.jpg b/data/2025/2504_10xxx/2504.10462/images/9064d8a14fd89c407062d6f65d9550a3b3b10c965b254fe31126fd6726721b04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..796266b5aa27467d6d3b88b3d3346822ae695415 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/9064d8a14fd89c407062d6f65d9550a3b3b10c965b254fe31126fd6726721b04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc0fd7bede917cc347bbf73311ab847b4a1e4f4efbdefa7626cfb0e7448ef312 +size 9604 diff --git a/data/2025/2504_10xxx/2504.10462/images/94d095fde53983aad3eb8a4956fd06eba46ff9fbee4031253bb8f8ccb00cc867.jpg b/data/2025/2504_10xxx/2504.10462/images/94d095fde53983aad3eb8a4956fd06eba46ff9fbee4031253bb8f8ccb00cc867.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fec866084b445091a07f30e1917abb6df378b23d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/94d095fde53983aad3eb8a4956fd06eba46ff9fbee4031253bb8f8ccb00cc867.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24fdc275e0b67c1adbd6b33d5f510d688ceb1a380899616475a354db0618396e +size 24998 diff --git a/data/2025/2504_10xxx/2504.10462/images/957eb719d56772c3ecd3d2e4f2915db952b9755d843d25088697d3eafffc2835.jpg b/data/2025/2504_10xxx/2504.10462/images/957eb719d56772c3ecd3d2e4f2915db952b9755d843d25088697d3eafffc2835.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bae2af6018c18c4a219a2bd688797a14da260d65 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/957eb719d56772c3ecd3d2e4f2915db952b9755d843d25088697d3eafffc2835.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f32374756d3fff4ae7ecb290821231e7452db0530df745e4ec675c0757d009f +size 9212 diff --git a/data/2025/2504_10xxx/2504.10462/images/a1c62c760920c7d865b90215f1f47dbfe4133eedf1aaf8f9b80698512acfbfef.jpg b/data/2025/2504_10xxx/2504.10462/images/a1c62c760920c7d865b90215f1f47dbfe4133eedf1aaf8f9b80698512acfbfef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d336cec23909a8b38ffb6454213f80204dde193e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/a1c62c760920c7d865b90215f1f47dbfe4133eedf1aaf8f9b80698512acfbfef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebeba3c4e286ae0cd9efc8cd79ec6be8e6071e7228d7efadddc388c5d034a11e +size 5281 diff --git a/data/2025/2504_10xxx/2504.10462/images/b1c4ad969f48db6a7414b4fd603874024b5c5b9bced4b811a8e22034583fe0c3.jpg b/data/2025/2504_10xxx/2504.10462/images/b1c4ad969f48db6a7414b4fd603874024b5c5b9bced4b811a8e22034583fe0c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f38cfb4780842f04877f6e7511a0a8f93258f03 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/b1c4ad969f48db6a7414b4fd603874024b5c5b9bced4b811a8e22034583fe0c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5261896f2703e83fe17b04e91528fb52169430601d33f89d3e2d4c8d7f69fbc8 +size 28610 diff --git a/data/2025/2504_10xxx/2504.10462/images/b962b4ccb763cd14712f5e425147173100dfb6ec6faff3b2f4df0162cf8345b5.jpg b/data/2025/2504_10xxx/2504.10462/images/b962b4ccb763cd14712f5e425147173100dfb6ec6faff3b2f4df0162cf8345b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3b6635471921528efa004edd21a26661e0fbf01 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/b962b4ccb763cd14712f5e425147173100dfb6ec6faff3b2f4df0162cf8345b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc03938d107f7a169d56404f6f40fa80b4863c2e420f12ab44179905cab3a49b +size 36291 diff --git a/data/2025/2504_10xxx/2504.10462/images/c3f078f825632a0a8ecfc0f90209ccb5bb4ce6fa98f6b3253c233d20f4b9b7d4.jpg b/data/2025/2504_10xxx/2504.10462/images/c3f078f825632a0a8ecfc0f90209ccb5bb4ce6fa98f6b3253c233d20f4b9b7d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aca6c43309903f8ebcb666ea5893a1b52f190f7c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/c3f078f825632a0a8ecfc0f90209ccb5bb4ce6fa98f6b3253c233d20f4b9b7d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bdcff459bab90a04d47070ba106cfaee68e1e5c6603fdb3c7b03bf6289966f9 +size 5436 diff --git a/data/2025/2504_10xxx/2504.10462/images/c743f0fe8f49bafa26967c3602456ccc131b4c41de5f8eefbc16ca5bcc4d4e12.jpg b/data/2025/2504_10xxx/2504.10462/images/c743f0fe8f49bafa26967c3602456ccc131b4c41de5f8eefbc16ca5bcc4d4e12.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac2fd942277eb8189825f7e9ade2adeff1f331d9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/c743f0fe8f49bafa26967c3602456ccc131b4c41de5f8eefbc16ca5bcc4d4e12.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2768fef7f76bd288e31e358f8054bbd0015e76c4c7d3f29b5d420118f0c7c0d +size 11567 diff --git a/data/2025/2504_10xxx/2504.10462/images/ca7b4d2d7629b3566f0d32cb2b1bd1e264daec4f37ae471c87d0ab3de85774dd.jpg b/data/2025/2504_10xxx/2504.10462/images/ca7b4d2d7629b3566f0d32cb2b1bd1e264daec4f37ae471c87d0ab3de85774dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd1b5cbf9f67c3c06ca476971534ec48124a0cb9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/ca7b4d2d7629b3566f0d32cb2b1bd1e264daec4f37ae471c87d0ab3de85774dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f829d999eb6144efd30e2102eaf9894311487b39409ca253cdc3356cec5dcedf +size 14519 diff --git a/data/2025/2504_10xxx/2504.10462/images/cbf681ad40eed4080496a59ec1cab9482e6085a739b47fb697bc4e5fa99eabde.jpg b/data/2025/2504_10xxx/2504.10462/images/cbf681ad40eed4080496a59ec1cab9482e6085a739b47fb697bc4e5fa99eabde.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a62a9000f29d2bd25af440bb5bbd9c1d78021b09 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/cbf681ad40eed4080496a59ec1cab9482e6085a739b47fb697bc4e5fa99eabde.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bac0c38e76dca248c45a3d611e2394393854b82dfe8e01b588475d30fd42838 +size 61102 diff --git a/data/2025/2504_10xxx/2504.10462/images/d4d1f138be91e3ae0a2bdd84b923df543b8646ec19b28b01a2a51525de73dedb.jpg b/data/2025/2504_10xxx/2504.10462/images/d4d1f138be91e3ae0a2bdd84b923df543b8646ec19b28b01a2a51525de73dedb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2542b062980c77c9413760f83ba204c703762b61 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/d4d1f138be91e3ae0a2bdd84b923df543b8646ec19b28b01a2a51525de73dedb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90eccfa74f6d9e2c267204e9925901f8c7d73593a36bd805a0b70b994a49dead +size 16630 diff --git a/data/2025/2504_10xxx/2504.10462/images/d6cdb7ea2a12e15ebee32a5cc29d4041d12e9a7a69f66eeb92347e9b7ed99910.jpg b/data/2025/2504_10xxx/2504.10462/images/d6cdb7ea2a12e15ebee32a5cc29d4041d12e9a7a69f66eeb92347e9b7ed99910.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82528f1956781d24b8e27b2e0d59d16996c2be5a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/d6cdb7ea2a12e15ebee32a5cc29d4041d12e9a7a69f66eeb92347e9b7ed99910.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab08759ff40723f39251c87677ee6b7bfdcb49b1baf8435f712d9f3eaa399555 +size 5061 diff --git a/data/2025/2504_10xxx/2504.10462/images/d74c79b2594ba8ce53efc20e4751a99dfed385f78b166c083f55da888145dbd3.jpg b/data/2025/2504_10xxx/2504.10462/images/d74c79b2594ba8ce53efc20e4751a99dfed385f78b166c083f55da888145dbd3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8be1c294d457d2c61a8550fec37d0aa417d25de8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/d74c79b2594ba8ce53efc20e4751a99dfed385f78b166c083f55da888145dbd3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ffb72c405003e58379dfc5a3bfe6537697aa48c2b881b6835998e13742d2a51 +size 9982 diff --git a/data/2025/2504_10xxx/2504.10462/images/e8f37e8f122c2c39ea9d4d546ac139f8ce9719c6c97ce52462f5ab50d470db13.jpg b/data/2025/2504_10xxx/2504.10462/images/e8f37e8f122c2c39ea9d4d546ac139f8ce9719c6c97ce52462f5ab50d470db13.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18d3513fcd9637e8b3c2346543bc715740600c6c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/e8f37e8f122c2c39ea9d4d546ac139f8ce9719c6c97ce52462f5ab50d470db13.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c665c65faf6cd877f4d0e038118d2270a7f938a59a6c3393894441c04011dad +size 11708 diff --git a/data/2025/2504_10xxx/2504.10462/images/eb6a28a28a20b99c926f16b7385ebc1e417e5d659893ab587113a8b6f7597b5d.jpg b/data/2025/2504_10xxx/2504.10462/images/eb6a28a28a20b99c926f16b7385ebc1e417e5d659893ab587113a8b6f7597b5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09bb3003c02e84a07be0e49654014445cb05c393 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/eb6a28a28a20b99c926f16b7385ebc1e417e5d659893ab587113a8b6f7597b5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa72ffc14e4abcbb580df10400471848316c56c81e4cfacd80048df450d4ee05 +size 7899 diff --git a/data/2025/2504_10xxx/2504.10462/images/f93ce8b5d07f651bb28ff57f3b09ece10f95d308a8933e9753d6ce14a5c1df50.jpg b/data/2025/2504_10xxx/2504.10462/images/f93ce8b5d07f651bb28ff57f3b09ece10f95d308a8933e9753d6ce14a5c1df50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56d524473ff13a86f7ab838aa23f1d0f3e3f8579 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/f93ce8b5d07f651bb28ff57f3b09ece10f95d308a8933e9753d6ce14a5c1df50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23899919ee2aa6a72c22bb02b4756a6d8c61ddb22d47fd8e5777fa02af64936c +size 37711 diff --git a/data/2025/2504_10xxx/2504.10462/images/fd5dd7896f1375e93dd6ec6d4a24135dfe379dae2d45342a2a5c556982984744.jpg b/data/2025/2504_10xxx/2504.10462/images/fd5dd7896f1375e93dd6ec6d4a24135dfe379dae2d45342a2a5c556982984744.jpg new file mode 100644 index 0000000000000000000000000000000000000000..135b9d5d4dbc5b54d5b89e76540a66c2a718ed62 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/images/fd5dd7896f1375e93dd6ec6d4a24135dfe379dae2d45342a2a5c556982984744.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:732bf7f7445285d32970c9dcfb3bbd3abd9a078deb6376fb8593e81e148fcf99 +size 54338 diff --git a/data/2025/2504_10xxx/2504.10462/layout.json b/data/2025/2504_10xxx/2504.10462/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ba329bd88807155ae453d6b7a465f4b80d420934 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10462/layout.json @@ -0,0 +1,11956 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 71, + 103, + 539, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 103, + 539, + 140 + ], + "spans": [ + { + "bbox": [ + 71, + 103, + 539, + 140 + ], + "type": "text", + "content": "The Scalability of Simplicity: Empirical Analysis of Vision-Language Learning with a Single Transformer" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 162, + 160, + 447, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 160, + 447, + 217 + ], + "spans": [ + { + "bbox": [ + 162, + 160, + 447, + 217 + ], + "type": "text", + "content": "Weixian Lei* Jiacong Wang* Haochen Wang* \nXiangtai Li Jun Hao Liew Jiashi Feng Zilong Huang† \n*Equal contribution, † Project Lead \nBytedance Seed" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 151, + 244, + 200, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 244, + 200, + 257 + ], + "spans": [ + { + "bbox": [ + 151, + 244, + 200, + 257 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 270, + 297, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 270, + 297, + 533 + ], + "spans": [ + { + "bbox": [ + 53, + 270, + 297, + 533 + ], + "type": "text", + "content": "This paper introduces SAIL, a single transformer unified multimodal large language model (MLLM) that integrates raw pixel encoding and language decoding within a singular architecture. Unlike existing modular MLLMs, which rely on a pre-trained vision transformer (ViT), SAIL eliminates the need for a separate vision encoder, presenting a more minimalist architecture design. Instead of introducing novel architectural components, SAIL adapts mix-attention mechanisms and multimodal positional encodings to better align with the distinct characteristics of visual and textual modalities. We systematically compare SAIL's properties—including scalability, cross-modal information flow patterns, and visual representation capabilities—with those of modular MLLMs. By scaling both training data and model size, SAIL achieves performance comparable to modular MLLMs. Notably, the removal of pretrained ViT components enhances SAIL's scalability and results in significantly different cross-modal information flow patterns. Moreover, SAIL demonstrates strong visual representation capabilities, achieving results on par with ViT-22B in vision tasks such as semantic segmentation. Code and models are available1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 555, + 135, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 555, + 135, + 567 + ], + "spans": [ + { + "bbox": [ + 55, + 555, + 135, + 567 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 575, + 296, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 575, + 296, + 696 + ], + "spans": [ + { + "bbox": [ + 55, + 575, + 296, + 696 + ], + "type": "text", + "content": "The pursuit of multimodal intelligence has driven the development of Multimodal Large Language Models (MLLMs) [49, 61, 68], which typically adopt a modular design: a pre-trained vision encoder (e.g., CLIPViT [15, 62]) extracts image features, a Large Language Model (LLM) [2, 16, 38, 58-60] processes text, and a lightweight projector aligns the two modalities. This framework achieves strong performance through multi-stage pretraining, supervised fine-tuning (SFT), and post-training on multimodal datasets [3, 18, 49, 76, 77, 91]. While effective," + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 315, + 251, + 437, + 363 + ], + "blocks": [ + { + "bbox": [ + 315, + 251, + 437, + 363 + ], + "lines": [ + { + "bbox": [ + 315, + 251, + 437, + 363 + ], + "spans": [ + { + "bbox": [ + 315, + 251, + 437, + 363 + ], + "type": "image", + "image_path": "87aacb4266bb5df777e2e46d47cbe6631fbba6a4cecc29791d9ff98e59ada6b7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 376, + 366, + 389, + 376 + ], + "lines": [ + { + "bbox": [ + 376, + 366, + 389, + 376 + ], + "spans": [ + { + "bbox": [ + 376, + 366, + 389, + 376 + ], + "type": "text", + "content": "(A)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 313, + 384, + 555, + 462 + ], + "lines": [ + { + "bbox": [ + 313, + 384, + 555, + 462 + ], + "spans": [ + { + "bbox": [ + 313, + 384, + 555, + 462 + ], + "type": "text", + "content": "Figure 1. (A) Data scaling curve for Modular Multimodal Large Language Model (MLLM) and SAIL, our Single Transformer-based MLLM. As pretraining data increases, the single transformer SAIL shows a sharper performance gain, demonstrating its superior data scalability. (B) Comparison to existing Single Transformer-based MLLMs: our SAIL pushes the performance boundaries on both vision tasks and vision-language tasks." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 438, + 251, + 553, + 363 + ], + "blocks": [ + { + "bbox": [ + 438, + 251, + 553, + 363 + ], + "lines": [ + { + "bbox": [ + 438, + 251, + 553, + 363 + ], + "spans": [ + { + "bbox": [ + 438, + 251, + 553, + 363 + ], + "type": "image", + "image_path": "74ee2a817ef3268b95e72eeb8f2fb43f406463207bb7cde5d1a436e7ac6a0bd7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 492, + 366, + 504, + 376 + ], + "lines": [ + { + "bbox": [ + 492, + 366, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 492, + 366, + 504, + 376 + ], + "type": "text", + "content": "(B)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 471, + 554, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 471, + 554, + 519 + ], + "spans": [ + { + "bbox": [ + 313, + 471, + 554, + 519 + ], + "type": "text", + "content": "this modular MLLM paradigm inherently fragments multimodal processing, reinforces reliance on pretrained visual encoders, which may limit deployment flexibility and scalability [11, 21, 54]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 312, + 520, + 555, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 520, + 555, + 675 + ], + "spans": [ + { + "bbox": [ + 312, + 520, + 555, + 675 + ], + "type": "text", + "content": "A promising alternative is to eliminate the visual encoder entirely and process raw image patches and text tokens within a single Transformer. This unified architecture removes modality-specific modules, enabling parameter sharing and end-to-end learning of vision-language interactions. Previous works [11, 21, 54] have primarily explored the architecture design, training data, and methods of Single Transformer-based MLLMs. However, little exploration has been given to their fundamental properties, such as scalability, cross-modal information flow patterns, and visual representation capabilities. A deeper understanding of these properties is crucial for unlocking the full potential of Single Transformer-based MLLMs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 677, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 556, + 713 + ], + "type": "text", + "content": "In this work, we present an experimental analysis of the fundamental properties of Single Transformer-based MLLMs and compare them to modular MLLMs (e.g." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 55, + 70, + 194, + 87 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 70, + 194, + 87 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 194, + 87 + ], + "type": "text", + "content": "ByteDance | Seed" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.10462v1 [cs.CV] 14 Apr 2025" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 66, + 702, + 231, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 702, + 231, + 712 + ], + "spans": [ + { + "bbox": [ + 66, + 702, + 231, + 712 + ], + "type": "text", + "content": "1https://github.com/bytedance/SAIL" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 91, + 294, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 294, + 149 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 294, + 149 + ], + "type": "text", + "content": "LLaVA [49]). Additionally, in the absence of a pre-trained visual encoder, Single Transformers have to learn visual representations from scratch. Thus, an intriguing question arises: can a trained Single Transformer emerge as a strong vision encoder?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 152, + 295, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 152, + 295, + 319 + ], + "spans": [ + { + "bbox": [ + 55, + 152, + 295, + 319 + ], + "type": "text", + "content": "We conduct a series of experiments to train and study our Single trAnformer model for vIsion and Language (SAIL). While we do not propose novel architecture designs, we introduce necessary modifications to enable the model to process different modalities in a unified architecture. In its micro architecture design, we address the different spatial characteristics of 2D images and 1D text data by employing a mixed attention mechanism: bidirectional attention for image patches and causal attention for text tokens, combined with multimodal rotary position embedding. Through model and data scaling, SAIL achieves performance on vision-language benchmarks comparable to modular MLLMs, while also functioning as a high-performing vision backbone, as shown in Figure 1." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 320, + 295, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 320, + 295, + 441 + ], + "spans": [ + { + "bbox": [ + 55, + 320, + 295, + 441 + ], + "type": "text", + "content": "More concretely, our empirical analysis uncovers following striking advantages of Single Transformer architectures: (i) Superior Data Scaling: In controlled experiments, SAIL exhibits steeper performance gains as pretraining data scales. While LLaVA-style modular MLLMs initially perform well, our model's performance becomes very close to theirs when pretrained on 512M samples, as shown in Figure 1(A). This suggests that unified architectures can effectively leverage large-scale data and potentially match the performance of modular MLLMs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 443, + 295, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 443, + 295, + 538 + ], + "spans": [ + { + "bbox": [ + 55, + 443, + 295, + 538 + ], + "type": "text", + "content": "(ii) Vision Centric Information Flow Pattern: Through analysis of attention distributions, we observe that Single Transformers assign significantly higher attention scores to image tokens during token prediction compared to modular MLLMs. This indicates that the information flow in Single Transformer MLLMs is more direct, with visual tokens influencing prediction tokens more prominently, highlighting a vision-centric approach to decision-making." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 540, + 295, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 540, + 295, + 670 + ], + "spans": [ + { + "bbox": [ + 55, + 540, + 295, + 670 + ], + "type": "text", + "content": "(iii) Vision Encoder Functioning: Our experiments further demonstrate that the pretrained Single Transformer inherently serves as a powerful vision encoder. Comprehensive evaluations on vision-centric tasks, such as image classification and semantic segmentation, show that the model learns rich visual representations during multimodal pretraining. These representations enhance its capacity for both semantic-level comprehension (e.g., object categorization) and pixel-level understanding (e.g., fine-grained segmentation masks), bridging high-level abstraction and low-level visual reasoning within a unified architecture." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 672, + 295, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 672, + 295, + 732 + ], + "spans": [ + { + "bbox": [ + 55, + 672, + 295, + 732 + ], + "type": "text", + "content": "In summary, our findings indicate that Single Transformer-based MLLMs hold great promise in surpassing modular MLLMs in terms of leveraging large-scale data, forming direct vision-centric information pathways, and functioning as effective vision encoders. We hope" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 91, + 553, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 553, + 139 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 553, + 139 + ], + "type": "text", + "content": "our empirical findings inspire further research to refine and enhance Single Transformer architecture, ultimately driving advancements in multimodal intelligence from a new perspective." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 152, + 400, + 165 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 152, + 400, + 165 + ], + "spans": [ + { + "bbox": [ + 313, + 152, + 400, + 165 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 174, + 548, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 174, + 548, + 186 + ], + "spans": [ + { + "bbox": [ + 313, + 174, + 548, + 186 + ], + "type": "text", + "content": "2.1. Paradigms in Vision-Language Model Design" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 193, + 553, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 193, + 553, + 479 + ], + "spans": [ + { + "bbox": [ + 313, + 193, + 553, + 479 + ], + "type": "text", + "content": "Modular MLLMs with Visual Encoders. The prevailing approach in MLLM design employs modular architectures [3, 49, 61, 68, 77] that rely on pretrained vision encoders (e.g., CLIP-ViT [15, 62], InternViT [14]) to process visual inputs. The visual features extracted from these frozen encoders are then aligned with LLM input spaces via linear [7, 49, 75, 91] or cross-attention layers [1, 33]. While this module design enables effective transfer of pretrained visual-language knowledge, it also introduces several limitations. First, incorporating a separate ViT encoder significantly slows down both training and inference, increasing deployment complexity and requiring costly infrastructure—especially when compared to a single transformer unified model. Second, common strategies for integrating visual features, such as direct mapping into LLM inputs [7, 49, 91] or sharing them across LLM layers [1, 33], often struggle to reconcile the inherent differences between images and text representations. Finally, as model scale, balancing the interactions between the the encoder, LLM, and alignment layers becomes increasingly challenging [11, 54]. Thus, in this work, we explore a single transformer-based MLLM architecture that eliminates the ViT encoder and alignment components to overcome these challenges." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 480, + 553, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 480, + 553, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 480, + 553, + 732 + ], + "type": "text", + "content": "Single Transformer-based MLLMs Without Visual Encoders. Emerging research explores end-to-end architectures that process raw image patches and text tokens through a single Transformer, bypassing visual encoders entirely. These monolithic designs fall into two categories: continuous tokenization and discrete tokenization. Continuous tokenization, exemplified by Fuyu-8B [5] and SOLO [11], directly maps patches to LLM embeddings via linear projections, enabling flexible resolution handling but requiring massive pretraining data. Discrete tokenization, adopted by Chameleon [67] and Emu3 [79], employs VQ-VAE tokenizers to compress images into discrete tokens, trading pixel-level fidelity for generation capabilities. While later efforts such as EVE [21] and MonoInternVL [54] demonstrate the feasibility of encoder-free training, critical gaps remain: (1) Existing methods rely on extra designs and auxiliary loss [21], complicating training pipelines; (2) The scaling laws and fundamental properties of purely end-to-end trained models remain poorly understood; (3) Vision-language interaction in shared parameter spaces lacks systematic analysis—most prior MLLMs" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 91, + 294, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 294, + 174 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 294, + 174 + ], + "type": "text", + "content": "default to causal attention for processing image-text sequences. In this work, we reveal that enabling bidirectional attention between image patches significantly enhances visual representation learning, addressing a key limitation in current designs. More importantly, our study bridges these gaps by establishing foundational principles for training scalable, self-contained single-transformer MLLMs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 183, + 228, + 196 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 183, + 228, + 196 + ], + "spans": [ + { + "bbox": [ + 55, + 183, + 228, + 196 + ], + "type": "text", + "content": "2.2. Vision Representation Learning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 201, + 294, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 201, + 294, + 273 + ], + "spans": [ + { + "bbox": [ + 55, + 201, + 294, + 273 + ], + "type": "text", + "content": "Learning effective vision representations is a core challenge in computer vision research, with extensive works [4, 19, 20, 26, 31, 80] dedicated to this problem. With the proliferation of large-scale web-sourced image-text datasets [6, 27, 63], recent methods leverage this data to train deep visual representations via three primary paradigms:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 274, + 295, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 274, + 295, + 369 + ], + "spans": [ + { + "bbox": [ + 55, + 274, + 295, + 369 + ], + "type": "text", + "content": "Text as Classification Labels. Early methods used textual descriptions as weak supervision by extracting categorical labels from captions. For example, frameworks like Tag2Text [34] and RAM [89] used ViTs [23] to predict noun-based pseudo-labels from datasets like CC12M [6]. CatLIP [57] scaled labels to millions using object-centric supervision, and SuperClass [35] directly used tokenized text tokens as classification categories." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 369, + 295, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 369, + 295, + 441 + ], + "spans": [ + { + "bbox": [ + 55, + 369, + 295, + 441 + ], + "type": "text", + "content": "Image-Text Contrastive Learning Contrastive pretraining, as exemplified by CLIP [15, 62] and ALIGN [37], aligns global image-text embeddings within a shared latent space. Subsequent works [8, 14, 42, 45, 78, 87] focused on enhancing CLIP's performance and improving training efficiency." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 441, + 295, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 441, + 295, + 596 + ], + "spans": [ + { + "bbox": [ + 55, + 441, + 295, + 596 + ], + "type": "text", + "content": "Text as Autoregressive Targets. Caption generation as a pretext task is another approach for visual representation learning. SimVLM [80] trains encoder-decoder architectures to autoregressively predict captions, while CapPa [71] trains vision encoders through sequence prediction. These methods often retain modular designs or auxiliary components like contrastive losses [83]. Our work aligns with this category but removes architectural fragmentation by jointly modeling image patches and text tokens in a single Transformer. We find that the pre-trained Single Transformer learns transferable vision representations, enabling it to handle downstream multimodal understanding tasks and function as a vision encoder without modifications." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 608, + 295, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 608, + 295, + 636 + ], + "spans": [ + { + "bbox": [ + 55, + 608, + 295, + 636 + ], + "type": "text", + "content": "3. SAIL: Training a Single Transformer for Vision and Language" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 642, + 170, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 170, + 653 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 170, + 653 + ], + "type": "text", + "content": "3.1. Model Architecture" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 660, + 295, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 660, + 295, + 732 + ], + "spans": [ + { + "bbox": [ + 55, + 660, + 295, + 732 + ], + "type": "text", + "content": "SAIL is built upon a unified Transformer architecture (Figure 2(A)) that processes multimodal inputs through streamlined, modality-specific preprocessing. For text, raw input is tokenized using the language model's tokenizer and then transformed into embeddings via the textual embedding module. For images, we partition the input into fixed-" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 91, + 553, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 553, + 234 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 553, + 234 + ], + "type": "text", + "content": "size patches and project them into continuous embeddings via a linear projection. Additionally, we maintain a list of special tokens explicitly designed for visual modality encoding: and tokens mark the beginning and end of an image patch span, respectively. In multimodal scenarios, such as image-text pairs, these embeddings are concatenated into a single sequence and fed into the Transformer, enabling joint cross-modal interactions through unified self-attention layers. This design eliminates the need for modality-specific encoders, which efficiently processess heterogeneous data within a single transformer framework." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 249, + 553, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 249, + 553, + 380 + ], + "spans": [ + { + "bbox": [ + 313, + 249, + 553, + 380 + ], + "type": "text", + "content": "Bidirectional attention within image patches. While existing multimodal large language models (MLLMs) [41, 48, 49, 91] predominantly adopt causal attention for autoregressive sequence modeling, our experiments reveal that enabling full bidirectional attention among tokens from the same image significantly enhances visual representation learning and boosts downstream vision-language task performance. Note that previous Single Transformer works [11, 21, 54, 79] have only utilized causal attention, without exploring the potential of mixed attention mechanisms." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 395, + 553, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 395, + 553, + 551 + ], + "spans": [ + { + "bbox": [ + 313, + 395, + 553, + 551 + ], + "type": "text", + "content": "As illustrated in Figure 2(B), for SAIL we implement a mixed attention scheme: (1) For text tokens, we preserve causal attention to maintain autoregressive generation capabilities, allowing each token to attend only to its predecessors. (2) For image tokens, we activate full bidirectional attention within each image patch group, empowering every visual token to interact with all others in the same image. This design captures holistic spatial relationships and contextual dependencies among visual elements, addressing the under-explored potential of attention mechanisms in cross-modal alignment. The improved interaction paradigm not only refines vision-language feature fusion but also provides stronger inductive biases for complex reasoning tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 564, + 553, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 564, + 553, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 564, + 553, + 732 + ], + "type": "text", + "content": "Multimodal Rotary Position Embeddings. Following [77], we implement Multimodal RoPE (M-RoPE) in SAIL to harmonize positional modeling for multimodal inputs. The method decomposes positional encoding into two axes: height, and width. For text tokens, all axes share uniform position IDs (aligned with 1D-RoPE), whereas for images, height/width IDs adaptively map to token coordinates, as is shown in Fig 2(C). Notably, position indexing is sequentially initialized across modalities (e.g., starting from images before extending to subsequent text), preserving inter-modal consistency. M-RoPE not only improves positional sensitivity but also constrains absolute position magnitudes for visual tokens, facilitating robust generalization to extended sequences in inference." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 77, + 93, + 309, + 308 + ], + "blocks": [ + { + "bbox": [ + 77, + 93, + 309, + 308 + ], + "lines": [ + { + "bbox": [ + 77, + 93, + 309, + 308 + ], + "spans": [ + { + "bbox": [ + 77, + 93, + 309, + 308 + ], + "type": "image", + "image_path": "f93ce8b5d07f651bb28ff57f3b09ece10f95d308a8933e9753d6ce14a5c1df50.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 319, + 555, + 376 + ], + "lines": [ + { + "bbox": [ + 55, + 319, + 555, + 376 + ], + "spans": [ + { + "bbox": [ + 55, + 319, + 555, + 376 + ], + "type": "text", + "content": "Figure 2. Model architecture and micro-designs for SAIL. (A) Model Architecture: SAIL is a unified transformer that processes both images and texts without extra module designs. (B) Mixed Attention Mechanism: we adopt bidirectional attention for image patches from the same image and causal attention for text tokens. Examples for a multimodal sequence and a text sequence are provided. Colored squares represent \"allow to attend\" and white squares indicate \"prevent from attending\". (C) Multimodal RoPE: an illustration of the multimodal rotary position embedding for SAIL, with examples for a multimodal sequence and a text sequence." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 328, + 89, + 531, + 308 + ], + "blocks": [ + { + "bbox": [ + 328, + 89, + 531, + 308 + ], + "lines": [ + { + "bbox": [ + 328, + 89, + 531, + 308 + ], + "spans": [ + { + "bbox": [ + 328, + 89, + 531, + 308 + ], + "type": "image", + "image_path": "02d4e6e0e828472c97f4452200369a3fa7e8e4ab06ccd32a8e320982bf2f5130.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 385, + 133, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 385, + 133, + 399 + ], + "spans": [ + { + "bbox": [ + 55, + 385, + 133, + 399 + ], + "type": "text", + "content": "3.2. Pretraining" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 404, + 295, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 404, + 295, + 441 + ], + "spans": [ + { + "bbox": [ + 55, + 404, + 295, + 441 + ], + "type": "text", + "content": "We apply a two-stage curriculum to progressively strengthen the visual perception of SAIL while preserving its inherent language capabilities." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 441, + 295, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 441, + 295, + 574 + ], + "spans": [ + { + "bbox": [ + 55, + 441, + 295, + 574 + ], + "type": "text", + "content": "Stage 1: Accelerated Visual Knowledge Acquisition. In this stage, we pretrain SAIL on large-scale image-text pairs to rapidly bootstrap its visual understanding. To maximize data throughput, we uniformly resize all images to a lower resolution (e.g., " + }, + { + "bbox": [ + 55, + 441, + 295, + 574 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 55, + 441, + 295, + 574 + ], + "type": "text", + "content": "), reducing multimodal sequence lengths and enabling the model to process more samples within fixed training time. To prevent catastrophic forgetting of linguistic knowledge, we interleave pure text corpora with multimodal data during training. This hybrid approach ensures efficient exposure to visual patterns while maintaining robust language proficiency." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 575, + 295, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 575, + 295, + 707 + ], + "spans": [ + { + "bbox": [ + 55, + 575, + 295, + 707 + ], + "type": "text", + "content": "Stage 2: Enhancing Any-Resolution Image Understanding. Real-world applications require robustness to images of varying resolutions and aspect ratios, such as documents, charts, or infographics. Following prior works [11, 21], we extend pretraining with an any-resolution strategy: images retain their native resolutions during processing, and positional embeddings adapt dynamically to arbitrary spatial dimensions. This stage further refines SAIL's ability to model fine-grained visual details (e.g., tabular structures, text-rich graphics) while continuing to incorporate text-only data for language capability preservation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 708, + 295, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 708, + 295, + 731 + ], + "spans": [ + { + "bbox": [ + 55, + 708, + 295, + 731 + ], + "type": "text", + "content": "Pretraining Objective. Throughout both stages, we optimize the standard language modeling loss only on text to-" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 386, + 555, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 386, + 555, + 410 + ], + "spans": [ + { + "bbox": [ + 313, + 386, + 555, + 410 + ], + "type": "text", + "content": "kens. Image patches and special visual tokens are excluded from loss computation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 445, + 446, + 458 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 445, + 446, + 458 + ], + "spans": [ + { + "bbox": [ + 314, + 445, + 446, + 458 + ], + "type": "text", + "content": "3.3. Supervised Fine-tuning" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 472, + 555, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 472, + 555, + 567 + ], + "spans": [ + { + "bbox": [ + 313, + 472, + 555, + 567 + ], + "type": "text", + "content": "During the Supervised Fine-tuning (SFT) stage, we train SAIL on publicly available, multi-source instruction datasets to enhance its understanding of complex linguistic instructions and diverse dialogue patterns critical for real-world deployment. This phase fine-tunes the entire network architecture, focusing on aligning the model's responses with human intent through exposure to varied instructional formats and multimodal interactions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 576, + 556, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 576, + 556, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 576, + 556, + 732 + ], + "type": "text", + "content": "Table 1 shows the details of training datasets for pretraining and supervised fine-tuning (SFT) across all stages. During Stage 1 pretraining, we utilized mixed multimodal and pure text datasets including Recap-DataComp-1B [43] and SlimPajama [66], with images at a resolution of " + }, + { + "bbox": [ + 313, + 576, + 556, + 732 + ], + "type": "inline_equation", + "content": "224\\mathrm{x}224" + }, + { + "bbox": [ + 313, + 576, + 556, + 732 + ], + "type": "text", + "content": " totaling 512M image-text pairs. In Stage 2, the pretraining datasets include Capfusion [84], self-curated OCR data from LAION COCO [63], InfinityMM Stage 2 subset [29], and SlimPajama, utilizing the any resolution (AnyRes) strategy, with a combined total of 86M image-text pairs along with text data. The SFT stage employed the InfinityMM Stage 3 subset, processed at any resolution, containing 6M image-text pairs." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 308, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 759 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 759 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 88, + 293, + 185 + ], + "blocks": [ + { + "bbox": [ + 58, + 88, + 293, + 185 + ], + "lines": [ + { + "bbox": [ + 58, + 88, + 293, + 185 + ], + "spans": [ + { + "bbox": [ + 58, + 88, + 293, + 185 + ], + "type": "table", + "html": "
StageDatasetImg.ResNum
Pretraining S1Recap-DataComp-1B [43]224x224512M
SlimPajama [66]-
Pretraining S2Capfusion [84]AnyRes60M
OCR from LAION COCO [63]7M
InifinityMM Stage 2 subset [29]19M
SlimPajama [66]-
SFTInifinityMM Stage3 [29]AnyRes6M
", + "image_path": "b962b4ccb763cd14712f5e425147173100dfb6ec6faff3b2f4df0162cf8345b5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 189, + 295, + 256 + ], + "lines": [ + { + "bbox": [ + 55, + 189, + 295, + 256 + ], + "spans": [ + { + "bbox": [ + 55, + 189, + 295, + 256 + ], + "type": "text", + "content": "Table 1. Details of training datasets used across all stages. \"Img.Res\" refers to the image resolution settings applied during each training stage. All datasets listed are publicly available. Note that these settings represent the default configuration for standard SAIL training, while separate settings are used for scaling experiments and ablation studies." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 262, + 132, + 276 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 262, + 132, + 276 + ], + "spans": [ + { + "bbox": [ + 55, + 262, + 132, + 276 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 282, + 182, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 282, + 182, + 296 + ], + "spans": [ + { + "bbox": [ + 55, + 282, + 182, + 296 + ], + "type": "text", + "content": "4.1. Experimental Settings" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 300, + 295, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 300, + 295, + 408 + ], + "spans": [ + { + "bbox": [ + 55, + 300, + 295, + 408 + ], + "type": "text", + "content": "Evaluation Benchmarks. For evaluation of vision and language tasks, we evaluate SAIL and existing MLLMs on a broad range of multimodal benchmarks. Specifically, MLLM benchmarks encompass MMBench-EN [50], SEEDBench-IMG [40], MMVet [85], MME [46], HallusionBench [30], MathVistaMINI [53], and OCR-Bench [51]. Visual question answering benchmarks include TextVQA [65], ScienceQA-IMG [52], AI2D [39], MMStar [9], RealWorldQA [81]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 408, + 295, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 408, + 295, + 456 + ], + "spans": [ + { + "bbox": [ + 55, + 408, + 295, + 456 + ], + "type": "text", + "content": "For evaluation of vision representation learning, we conduct experiments on ImageNet-1K [20] for image classification, ADE20K for semantic segmentation [90], and ARO [86] for attribute, relation, and ordering." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 456, + 295, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 456, + 295, + 624 + ], + "spans": [ + { + "bbox": [ + 55, + 456, + 295, + 624 + ], + "type": "text", + "content": "Implementation Details. For pretraining, we initialize SAIL from the Mistral-7B-v0.1 base LLM and set the patch size to 14. We modify Megatron [64] to support SAIL's multimodal input. Pretraining uses 128 NVIDIA A100 80G GPUs with 2-way tensor parallelism and 64-way data parallelism. The learning rate is set at 5e-5 and decays cosinely to a minimum of 5e-6. For training efficiency, we concatenate sequences from different data samples into one long sequence of 32,768 tokens, adjusting the attention mask to ensure that tokens from different samples do not attend to each other. We use a round-robin approach to interleave image-text packed sequences and pure text packed sequences, configuring the global batch to contain approximately 16K image-text pairs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 624, + 295, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 624, + 295, + 672 + ], + "spans": [ + { + "bbox": [ + 55, + 624, + 295, + 672 + ], + "type": "text", + "content": "For Supervised Fine-Tuning (SFT), the global batch size is set to 512. Training is performed for one epoch with a maximum learning rate of 1e-5, following a linear warm-up phase and then transitioning to a cosine decay schedule." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 672, + 295, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 672, + 295, + 733 + ], + "spans": [ + { + "bbox": [ + 55, + 672, + 295, + 733 + ], + "type": "text", + "content": "For vision, we load the checkpoint after Stage 1 pretraining and keep it frozen for downstream evaluations, including (1) image classification on ImageNet-1K [20], (2) semantic segmentation on ADE20K [90], and (3) attribute, relation and, ordering on the ARO benchmark [86]. Specif" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "text", + "content": "ically, (1) for image classification, we utilize an attention-based classifier [25] with 90 epochs of linear probing, where detailed configurations are mostly obtained from common practices [32, 73, 74]. Images are resized to " + }, + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "text", + "content": " and the global batch size is 8,192 across 8 A100 (80G) GPUs. (2) For semantic segmentation, we adopt ViT-Adapter [12] with UperNet [82] as the segmentation decoder. The implementation is based on MMSegmentation [17] with 80k training iterations. The input resolution is " + }, + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "text", + "content": " and the global batch size is 16 across 8 A100 (80G) GPUs. (3) For attribute, relation and, ordering, we regard the negative of the caption loss over each image-text pair as the similarity metric for retrieval." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 255, + 437, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 255, + 437, + 268 + ], + "spans": [ + { + "bbox": [ + 313, + 255, + 437, + 268 + ], + "type": "text", + "content": "4.2. Experimental Results" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 274, + 487, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 274, + 487, + 285 + ], + "spans": [ + { + "bbox": [ + 313, + 274, + 487, + 285 + ], + "type": "text", + "content": "4.2.1. Results on Vision Language Tasks" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 289, + 555, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 289, + 555, + 553 + ], + "spans": [ + { + "bbox": [ + 313, + 289, + 555, + 553 + ], + "type": "text", + "content": "As shown in Table 2, we compare SAIL against existing MLLMs across 13 vision-language benchmarks. SAIL consistently outperforms other Single Transformer-based models like Fuyu [5], EVE [21], SOLO [11], MonoInternVL [54], and EVE2 [22] across diverse vision-language tasks. This demonstrates that SAIL can achieve significant performance gains and push the boundaries of Single Transformer-based MLLMs without needing extra component designs or auxiliary training losses. Moreover, when compared to methods employing discrete vision tokens (e.g., Chameleon and Emu3), SAIL demonstrates superior performance. These results validate that scaling up single-transformer pretraining effectively enhances cross-modal alignment between images and text. Compared to the state-of-the-art modular MLLM LLaVA-OneVision [41], SAIL achieves comparable performance on some benchmarks, such as MMStar, SEEDBench-IMG, and RealWorldQA. While the performance of Single Transformer-based MLLMs still lags behind modular MLLMs in certain areas, we hypothesize that scaling the pretraining data volume or incorporating higher-quality instruction-tuning data will bridge the remaining performance gap." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 560, + 525, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 560, + 525, + 572 + ], + "spans": [ + { + "bbox": [ + 313, + 560, + 525, + 572 + ], + "type": "text", + "content": "4.2.2. Results on Vision Representation Learning" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 575, + 555, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 575, + 555, + 624 + ], + "spans": [ + { + "bbox": [ + 313, + 575, + 555, + 624 + ], + "type": "text", + "content": "In this section, we compare the quality of learned visual representations of our SAIL with other Single Transformer-based alternatives, including EVE [21], EVE2 [22], and SOLO [11]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "text", + "content": "Classification and Segmentation. As demonstrated in Table 3, our method, SAIL, achieves a Top-1 accuracy of " + }, + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "inline_equation", + "content": "84.95\\%" + }, + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "text", + "content": " and a Top-5 accuracy of " + }, + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "inline_equation", + "content": "97.59\\%" + }, + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "text", + "content": " on the validation set of ImageNet-1K [20], significantly outperforming state-of-the-art alternatives [11, 21, 22]. In the segmentation task, SAIL also demonstrates superior performance with an mIoU of " + }, + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "inline_equation", + "content": "55.30\\%" + }, + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "text", + "content": ", an mAcc of " + }, + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "inline_equation", + "content": "67.24\\%" + }, + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "text", + "content": ", and an aAcc of " + }, + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "inline_equation", + "content": "84.87\\%" + }, + { + "bbox": [ + 313, + 624, + 556, + 732 + ], + "type": "text", + "content": ", illustrated in Table 3. These results indicate that SAIL is effective in both classification and segmentation tasks, of" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 88, + 553, + 272 + ], + "blocks": [ + { + "bbox": [ + 58, + 88, + 553, + 272 + ], + "lines": [ + { + "bbox": [ + 58, + 88, + 553, + 272 + ], + "spans": [ + { + "bbox": [ + 58, + 88, + 553, + 272 + ], + "type": "table", + "html": "
Method#Param#Data#VtokenGeneral VQAHallucinationMath&knowledgeOCR VQA
MMS*MMBenSEEDIMMVMMERWQAPOPEHalluSQA1MathVTQAAI2DOCRB
Modular MLLMs:
InternVL-1.5 [13]2.2B-/-332846.770.969.839.3190257.988.337.384.941.370.569.8654
QwenVL-Chat [3]7B7.2B / 50M25634.560.658.2-184849.3-36.868.235.361.545.9488
LLVA-1.5 [47]7B0.4B+ / 665K57633.164.364.330.5185954.885.927.666.825.546.154.8318
LLVA-1.6 [48]7B0.4B+ / 760K288037.667.464.743.9184257.886.427.670.232.564.966.6532
Cambrian-1 [69]8B10B+ / 7M57650.775.974.7--64.2-30.680.448.171.773.0-
LLVA-OneVision [41]7B10B+ / 3.2M729060.981.774.858.8199865.5--96.656.1-81.6-
Single Transformer-based MLLMs:
Fuyu [5]8B-/--34.410.759.321.4-43.78429.856.830.2-46.8366
Chameleon [67]7B1.4B+ / 1.8M102431.131.130.68.31703919.417.147.222.54.846.07.0
EVE [21]7B33M / 1.8M2304-52.364.625.71628-85.0-64.9-56.861.0398
SOLO [11]8B43.7M / 2M102435.867.764.430.4126044.778.640.473.332.925.061.4126
Mono-InternVL [54]3B1.3B / 7M6400-65.567.440.11875--45.793.645.772.668.6767
Emu3 [79]8B-/-16K46.658.568.237.2-57.485.231.789.231.364.770.0687
EVE2 [22]7B92M / 7.3M2500-66.371.445.0170962.487.6-96.2-71.174.8702
SAIL7B600M / 6M360053.170.172.946.3171963.985.854.293.357.077.176.7783
", + "image_path": "0cb729d503de7c017da687d7d77c3458500e81ea0c18677e9175ecc68a34137c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 56, + 360, + 294, + 443 + ], + "blocks": [ + { + "bbox": [ + 55, + 275, + 555, + 353 + ], + "lines": [ + { + "bbox": [ + 55, + 275, + 555, + 353 + ], + "spans": [ + { + "bbox": [ + 55, + 275, + 555, + 353 + ], + "type": "text", + "content": "Table 2. Comparison with existing vision-language models on various vision-language benchmarks, including MMS*: MMStar [9]; MMBen:MMBench-EN [50]; SEED:SEEDBench-Img [40]; MMV:MMVet [85]; MME [46]; POPE [44]; Hallu: HallusionBench [30]; SQA: ScienceQA-Img [52]; TVQA: TextVQA [65]; MathV: MathVistaMINI [53]; AI2D [39]; RWQA: RealWorldQA [81]; OCRB:OCR-Bench [51]. Note that #A-Param denotes the number of activated parameters; #Data represents the pre-training / fine-tuning data volume; #Vtoken indicates the maximum image patch tokens. For MME, we report the sum of perception and cognition scores. The top two results are highlighted in bold and underline, respectively. All results are derived from those reported in other papers and the official reproduction results from the OpenCompass leaderboard [24]. Our results are obtained by VLMEvalKit [24]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 360, + 294, + 443 + ], + "lines": [ + { + "bbox": [ + 56, + 360, + 294, + 443 + ], + "spans": [ + { + "bbox": [ + 56, + 360, + 294, + 443 + ], + "type": "table", + "html": "
MethodClassificationSegmentation
Top-1Top-5mIoUmAccaAcc
EVE [21]42.0365.7727.1235.8972.91
EVE2 [22]44.8669.4140.8553.5379.31
SOLO [11]59.1080.8935.1144.8176.02
SAIL84.9597.5955.3067.2484.87
", + "image_path": "b1c4ad969f48db6a7414b4fd603874024b5c5b9bced4b811a8e22034583fe0c3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 58, + 495, + 290, + 574 + ], + "blocks": [ + { + "bbox": [ + 55, + 448, + 295, + 482 + ], + "lines": [ + { + "bbox": [ + 55, + 448, + 295, + 482 + ], + "spans": [ + { + "bbox": [ + 55, + 448, + 295, + 482 + ], + "type": "text", + "content": "Table 3. Comparison on image classification and semantic segmentation with other encoder-free approaches. Our SAIL outperforms other alternatives by a large margin." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 495, + 290, + 574 + ], + "lines": [ + { + "bbox": [ + 58, + 495, + 290, + 574 + ], + "spans": [ + { + "bbox": [ + 58, + 495, + 290, + 574 + ], + "type": "table", + "html": "
Method#Data#ParamImageNet-1KADE20K
OpenCLIP-H [15]2B0.6B84.4-
OpenCLIP-G [15]2B1.8B86.239.3†
ViT-22B [19]3B22B89.555.3
InternViT [14]6B6B88.258.7
SAIL0.5B7B85.055.3
", + "image_path": "35b227d6c52d257cd44604f0355dc61261e44c4898ffbe3227dfe22da29d8176.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 647, + 295, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 647, + 295, + 707 + ], + "spans": [ + { + "bbox": [ + 55, + 647, + 295, + 707 + ], + "type": "text", + "content": "fering substantial improvements over existing methods. In Table 4, even when comparing with other state-of-the-art vision backbones, our SAIL manages to achieve remarkable competitive performance with significantly less training data, demonstrating the scaling property of SAIL." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 708, + 296, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 708, + 296, + 732 + ], + "spans": [ + { + "bbox": [ + 55, + 708, + 296, + 732 + ], + "type": "text", + "content": "Attribute, Relation, and Ordering. To systematically evaluate the ability of SAIL to understand different types of" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 316, + 360, + 553, + 487 + ], + "blocks": [ + { + "bbox": [ + 55, + 578, + 295, + 633 + ], + "lines": [ + { + "bbox": [ + 55, + 578, + 295, + 633 + ], + "spans": [ + { + "bbox": [ + 55, + 578, + 295, + 633 + ], + "type": "text", + "content": "Table 4. Comparison on image classification and semantic segmentation with other vision backbones. " + }, + { + "bbox": [ + 55, + 578, + 295, + 633 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 55, + 578, + 295, + 633 + ], + "type": "text", + "content": " indicates training with head tuning using UperNet [82], while others are based on ViT-Adapter [12]. SAIL, with significantly less training data, achieves competitive performance." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 316, + 360, + 553, + 487 + ], + "lines": [ + { + "bbox": [ + 316, + 360, + 553, + 487 + ], + "spans": [ + { + "bbox": [ + 316, + 360, + 553, + 487 + ], + "type": "table", + "html": "
MethodRelationAttributeOrder
COCOFlickr30K
OpenCLIP-H [15]49.964.632.640.4
OpenCLIP-G [15]49.965.633.038.3
CLIP-B/32 [62]59.262.948.157.9
CLIP-L/14 [62]61.261.746.856.8
InternViT [14]59.666.073.476.3
NegCLIP [86]81.071.086.091.0
CapPa [72]86.785.798.899.2
SAIL100.099.5100.0100.0
", + "image_path": "78c033c10d95a55f2ce039c403daf56a8cb6a605be2c6ec2a69fb139cd0433f2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 492, + 555, + 526 + ], + "lines": [ + { + "bbox": [ + 313, + 492, + 555, + 526 + ], + "spans": [ + { + "bbox": [ + 313, + 492, + 555, + 526 + ], + "type": "text", + "content": "Table 5. Comparison on attribute, relation, and ordering (ARO) with other vision backbones. SAIL almost encodes compositional relationships between objects and attributes perfectly." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 536, + 553, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 536, + 553, + 597 + ], + "spans": [ + { + "bbox": [ + 313, + 536, + 553, + 597 + ], + "type": "text", + "content": "relationships, attributes, and order information, we conduct experiments on the ARO benchmark [86]. As demonstrated in Table 5, SAIL encodes compositional relationships between objects and attributes almost perfectly, significantly surpassing other state-of-the-art vision backbones." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 597, + 553, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 597, + 553, + 633 + ], + "spans": [ + { + "bbox": [ + 313, + 597, + 553, + 633 + ], + "type": "text", + "content": "For additional vision-related tasks, please refer to PixelSAIL [88] for SAIL's downstream capabilities in pixel-grounded understanding." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 639, + 491, + 652 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 639, + 491, + 652 + ], + "spans": [ + { + "bbox": [ + 313, + 639, + 491, + 652 + ], + "type": "text", + "content": "4.3. Properties of Single Transformer" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 657, + 423, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 657, + 423, + 669 + ], + "spans": [ + { + "bbox": [ + 313, + 657, + 423, + 669 + ], + "type": "text", + "content": "4.3.1. Scaling Properties." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 672, + 553, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 672, + 553, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 672, + 553, + 732 + ], + "type": "text", + "content": "Model Scaling: We selected models of different sizes: SAIL-0.5B, SAIL-3B, and SAIL-7B (SAIL by default) for our experiments. Each model underwent Stage 1 pretraining on a mixed multimodal and pure text dataset, encountering 512M image-text pairs. Subsequently, they were fine" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 88, + 176, + 187 + ], + "blocks": [ + { + "bbox": [ + 57, + 88, + 176, + 187 + ], + "lines": [ + { + "bbox": [ + 57, + 88, + 176, + 187 + ], + "spans": [ + { + "bbox": [ + 57, + 88, + 176, + 187 + ], + "type": "image", + "image_path": "e8f37e8f122c2c39ea9d4d546ac139f8ce9719c6c97ce52462f5ab50d470db13.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 196, + 295, + 242 + ], + "lines": [ + { + "bbox": [ + 55, + 196, + 295, + 242 + ], + "spans": [ + { + "bbox": [ + 55, + 196, + 295, + 242 + ], + "type": "text", + "content": "Figure 3. Model scaling of SAIL. Left: As the model size increases, the training language modeling loss gradually decreases. Right: As the model size increases, performance on downstream VLM tasks progressively improves." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 176, + 88, + 294, + 187 + ], + "blocks": [ + { + "bbox": [ + 176, + 88, + 294, + 187 + ], + "lines": [ + { + "bbox": [ + 176, + 88, + 294, + 187 + ], + "spans": [ + { + "bbox": [ + 176, + 88, + 294, + 187 + ], + "type": "image", + "image_path": "08e984e5b7eac62672319f52aa00b0a3fb3f0b7e9bc754dbd7bad2e6f2348257.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 260, + 295, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 260, + 295, + 308 + ], + "spans": [ + { + "bbox": [ + 55, + 260, + 295, + 308 + ], + "type": "text", + "content": "tuned on the LLaVA-mix-665K dataset using the any resolution (anyres) strategy. We evaluated the models based on their performance on vision and language benchmarks after supervised fine-tuning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 310, + 295, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 310, + 295, + 453 + ], + "spans": [ + { + "bbox": [ + 55, + 310, + 295, + 453 + ], + "type": "text", + "content": "The normalized performance of SAIL against model size is plotted in Figure 3. As the model size scales up, we observe a corresponding enhancement in performance. Additionally, as shown on the left side of Figure 3, the training language modeling loss decreases with increasing model size. This reduction in training loss indicates that larger models have a greater capacity to learn multimodal alignments effectively, enabling them to capture complex relationships between vision and language more accurately. The improved learning capacity directly translates to better performance on downstream VLM tasks, showcasing the benefits of scaling up the Single Transformer architecture." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 455, + 295, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 455, + 295, + 694 + ], + "spans": [ + { + "bbox": [ + 55, + 455, + 295, + 694 + ], + "type": "text", + "content": "Data Scaling: we compared SAIL with its modular MLLM counterpart. For the modular MLLM, we used SigLIPSO [87] as the vision encoder, and the language model shared the same architecture and initialization parameters as SAIL. Both models were pre-trained using Pretraining stage-1 setting, with SAIL encountering 32M, 128M, and 512M image-text pairs during training, followed by fine-tuning on the LLaVA-mix-665K dataset. All parameters of both models are trainable. Both models employ an identical number of input tokens for images and text. The normalized performance of both models is plotted in Figure 1(A). The results show that in the low-data regime (32M), SAIL's performance lags behind the modular MLLM, likely due to SigLIP's prior training on 40B samples. However, as the data scales, SAIL exhibits a steeper performance curve, indicating more promising data scaling properties. At 512M image-text pairs, SAIL achieves performance comparable to the modular MLLM in our evaluation subset. This demonstrates the single transformer's superior data scalability, even without a pretrained vision encoder." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 696, + 296, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 696, + 296, + 733 + ], + "spans": [ + { + "bbox": [ + 55, + 696, + 296, + 733 + ], + "type": "text", + "content": "Quantitative results on evaluated benchmark tasks of model scaling and data scaling are tabulated in the appendix." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 316, + 89, + 552, + 247 + ], + "blocks": [ + { + "bbox": [ + 316, + 89, + 552, + 247 + ], + "lines": [ + { + "bbox": [ + 316, + 89, + 552, + 247 + ], + "spans": [ + { + "bbox": [ + 316, + 89, + 552, + 247 + ], + "type": "image", + "image_path": "23f836472eb6724001df347ec8381d670f86dae4957d99f0de1396cfaa1bc7f7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 255, + 555, + 322 + ], + "lines": [ + { + "bbox": [ + 313, + 255, + 555, + 322 + ], + "spans": [ + { + "bbox": [ + 313, + 255, + 555, + 322 + ], + "type": "text", + "content": "Figure 4. Image Attention Score Allocation: The figure shows the proportion of image attention scores across different transformer layers for Single Transformer-based MLLM and modular MLLM when predicting tokens. Single Transformer-based MLLM generally allocates higher attention weights to image tokens compared to modular MLLM." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 342, + 452, + 353 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 342, + 452, + 353 + ], + "spans": [ + { + "bbox": [ + 313, + 342, + 452, + 353 + ], + "type": "text", + "content": "4.3.2. Information Flow Pattern" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 358, + 555, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 358, + 555, + 537 + ], + "spans": [ + { + "bbox": [ + 313, + 358, + 555, + 537 + ], + "type": "text", + "content": "Different attention pattern compared to modular MLLM: since our comparative experiments show that the Single Transformer model exhibits more promising data scaling properties, we conducted an analysis of the trained SAIL model and its modular MLLM counterpart. Specifically, we followed the methodology from FastV [10] to analyze the attention score distribution for each predicted token given an image and a user query. This analysis focuses on how much attention is allocated to image tokens during token prediction. We selected 1000 samples from various datasets including VQAv2, GQA, TextVQA, DocVQA, MME, SEEDBench-IMG, MMBench, and some self-curated dialog examples. For each model prediction, we computed the average attention scores assigned to previous image tokens by the output token." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 539, + 554, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 539, + 554, + 611 + ], + "spans": [ + { + "bbox": [ + 313, + 539, + 554, + 611 + ], + "type": "text", + "content": "We conducted a comparative experiment between Single Transformer-based MLLMs and modular MLLMs. The Single Transformer-based MLLMs included SAIL, SOLO [11], and EVE [21], while the modular MLLMs included Qwen2-VL [77], LLaVA-OneVision [41], and LLaVA1.5 [47]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 613, + 554, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 613, + 554, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 613, + 554, + 732 + ], + "type": "text", + "content": "The results are depicted in Figure 4. Single Transformer-based MLLMs allocate between " + }, + { + "bbox": [ + 313, + 613, + 554, + 732 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 313, + 613, + 554, + 732 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 613, + 554, + 732 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 313, + 613, + 554, + 732 + ], + "type": "text", + "content": " of attention scores to image tokens across all layers when predicting tokens. In contrast, modular MLLMs such as Qwen2-VL and LLaVA-OneVision allocate only " + }, + { + "bbox": [ + 313, + 613, + 554, + 732 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 313, + 613, + 554, + 732 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 313, + 613, + 554, + 732 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 313, + 613, + 554, + 732 + ], + "type": "text", + "content": " of attention scores to image tokens across different layers. For LLaVA1.5, which does not update the ViT parameters during supervised fine-tuning (SFT), the image attention score is relatively high in the first two transformer layers but declines sharply in subsequent layers." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 759 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 759 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 88, + 294, + 159 + ], + "blocks": [ + { + "bbox": [ + 58, + 88, + 294, + 159 + ], + "lines": [ + { + "bbox": [ + 58, + 88, + 294, + 159 + ], + "spans": [ + { + "bbox": [ + 58, + 88, + 294, + 159 + ], + "type": "table", + "html": "
MethodMMBench [50]MME [46]
Physical RelationCelebrity RelationPositionPostersCelebrity
Modular MLLM30.450.598.3134.0100.3
SAIL52.288.9160.0108.275.0
", + "image_path": "94d095fde53983aad3eb8a4956fd06eba46ff9fbee4031253bb8f8ccb00cc867.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 240, + 295, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 240, + 295, + 314 + ], + "spans": [ + { + "bbox": [ + 55, + 240, + 295, + 314 + ], + "type": "text", + "content": "From this experiment, we can conclude that Single Transformer-based MLLMs tend to allocate a significant portion of attention to previous image tokens during prediction. In contrast, modular MLLMs allocate a smaller portion of their attention directly to image tokens, indicating a less image-centric approach in their prediction mechanism." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 316, + 296, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 316, + 296, + 376 + ], + "spans": [ + { + "bbox": [ + 55, + 316, + 296, + 376 + ], + "type": "text", + "content": "These findings indicate that the Single Transformer model places more emphasis on grounding its predictions in the visual information. As the model undergoes data scaling, it allocates more effective computation to image tokens, thereby enhancing its capability as a vision-centric model." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "spans": [ + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "type": "text", + "content": "In summary, the attention pattern analysis underscores the Single Transformer's ability to robustly integrate visual context, enabling it to scale efficiently and potentially outperform modular MLLMs in vision-language tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 443, + 236, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 443, + 236, + 455 + ], + "spans": [ + { + "bbox": [ + 55, + 443, + 236, + 455 + ], + "type": "text", + "content": "4.3.3. Task-Specific Performance Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 462, + 295, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 462, + 295, + 486 + ], + "spans": [ + { + "bbox": [ + 55, + 462, + 295, + 486 + ], + "type": "text", + "content": "We dissect SAIL's strengths and limitations through targeted case studies:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 489, + 295, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 489, + 295, + 574 + ], + "spans": [ + { + "bbox": [ + 55, + 489, + 295, + 574 + ], + "type": "text", + "content": "Strengths: Spatial Reasoning. SAIL excels at tasks requiring precise spatial location. As shown in Table 6, under the setting of our data scaling experiment, it outperforms the modular counterpart by 61.7 points on the MME Position split and " + }, + { + "bbox": [ + 55, + 489, + 295, + 574 + ], + "type": "inline_equation", + "content": "21.8\\%" + }, + { + "bbox": [ + 55, + 489, + 295, + 574 + ], + "type": "text", + "content": " on MMBench Physical Relation questions. The unified architecture likely enables tighter coupling between visual geometry and linguistic descriptions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 576, + 295, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 576, + 295, + 732 + ], + "spans": [ + { + "bbox": [ + 55, + 576, + 295, + 732 + ], + "type": "text", + "content": "Weaknesses: World Knowledge. Conversely, SAIL falls short in tasks that demand extensive world knowledge. As shown in Table 6 SAIL underperforms in the MME celebrity and art splits compared to the modular MLLM. This underperformance can be attributed to SAIL's lack of diverse domain-specific data during pretraining, a gap that was not sufficiently addressed during supervised fine-tuning. Modular MLLMs, with their pretrained vision encoders like CLIP [15, 62] or SigLIP [87], have a broader knowledge base and therefore handle such tasks more effectively. We hypothesize that scaling up SAIL's pretraining data diversity could help bridge this gap, enhancing its performance on knowledge-intensive tasks." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 319, + 89, + 553, + 141 + ], + "blocks": [ + { + "bbox": [ + 55, + 162, + 295, + 218 + ], + "lines": [ + { + "bbox": [ + 55, + 162, + 295, + 218 + ], + "spans": [ + { + "bbox": [ + 55, + 162, + 295, + 218 + ], + "type": "text", + "content": "Table 6. Performance Comparison of SAIL and Modular MLLM in MMBench and MME Tasks: the strengths of SAIL in spatial reasoning tasks (MMBench Physical Relation and MME Position split) and its weaknesses in world knowledge tasks (MMBench Celebrity Relation and MME Celebrity and Posters splits)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 319, + 89, + 553, + 141 + ], + "lines": [ + { + "bbox": [ + 319, + 89, + 553, + 141 + ], + "spans": [ + { + "bbox": [ + 319, + 89, + 553, + 141 + ], + "type": "table", + "html": "
Exp. SettingVQAv2GQASQATQASEED-I
Default59.146.959.620.135.1
#1 No Img full attn57.845.258.716.233.8
#2 No pure text in PT56.342.148.618.332.4
", + "image_path": "3bb96e8beba6754511e44dc43585c34466a7f1e614f055714a6d3c6d31caaa15.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 144, + 555, + 224 + ], + "lines": [ + { + "bbox": [ + 313, + 144, + 555, + 224 + ], + "spans": [ + { + "bbox": [ + 313, + 144, + 555, + 224 + ], + "type": "text", + "content": "Table 7. Ablation Study on Basic Factors for SAIL: This table presents the impact of different ablation settings on the performance of SAIL across VQAv2 [28], GQA [36], SQA [52], TQA [65], and SEED-I [40]. The default setting includes image full attention and the inclusion of pure text data in pretraining. Ablation #1 removes image full attention, and ablation #2 excludes pure text in pretraining." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 241, + 529, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 241, + 529, + 255 + ], + "spans": [ + { + "bbox": [ + 313, + 241, + 529, + 255 + ], + "type": "text", + "content": "4.4. Empirical Observations on Basic Factors" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 260, + 555, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 260, + 555, + 332 + ], + "spans": [ + { + "bbox": [ + 313, + 260, + 555, + 332 + ], + "type": "text", + "content": "To guide scalable training of single-transformer MLLMs, we conduct ablation studies on two critical design choices using SAIL-0.5B pretrained on 128M image-text pairs and fine-tuned on LLaVA-mix-665K. Performance is evaluated through zero-shot image classification after pretraining [71] and vision-language benchmarks after SFT." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 334, + 556, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 334, + 556, + 514 + ], + "spans": [ + { + "bbox": [ + 313, + 334, + 556, + 514 + ], + "type": "text", + "content": "Bidirectional Attention for Image Patches with Multimodal Position Encoding. We compare two approaches for integrating image patches into the transformer: (1) Causal attention with 1D positional encoding, using a token to demarcate image rows. (2) Full bidirectional attention for image patches paired with multimodal rotary position embeddings (RoPE), which jointly encode spatial coordinates (e.g., 2D grid positions) and text token positions. As shown in Table 7, configuration of using bidirectional attention with multimodal RoPE significantly improves performance on vision-language tasks, with a particularly notable gain of " + }, + { + "bbox": [ + 313, + 334, + 556, + 514 + ], + "type": "inline_equation", + "content": "3.1\\%" + }, + { + "bbox": [ + 313, + 334, + 556, + 514 + ], + "type": "text", + "content": " on TextVQA. This suggests that enabling cross-patch interactions during pretraining enhances visual representation learning and tightens cross-modal alignment." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 514, + 556, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 556, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 556, + 658 + ], + "type": "text", + "content": "Interleaving Pure Text Data During Pretraining. We analyze the impact of mixing SlimPajama text data with image-text pairs during pretraining. The results, as presented in Table 7 #2, reveal that mixing in pure text data consistently improves performance across vision and language benchmarks. This finding underscores the importance of preserving language capabilities in the LLM when training Single Transformer models, as maintaining strong language skills is crucial for building a multimodal model capable of complex reasoning. Currently, incorporating text data in training is one of the effective methods to maintain the language abilities of the model." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 659, + 556, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 659, + 556, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 659, + 556, + 732 + ], + "type": "text", + "content": "In conclusion, our ablation studies identify key design choices for training SAIL effectively. Using bi-directional attention with multimodal rotary position embeddings enhances visual perception, while incorporating pure text data preserves essential language capabilities for robust multimodal performance." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 89, + 128, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 89, + 128, + 102 + ], + "spans": [ + { + "bbox": [ + 55, + 89, + 128, + 102 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 110, + 297, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 110, + 297, + 301 + ], + "spans": [ + { + "bbox": [ + 55, + 110, + 297, + 301 + ], + "type": "text", + "content": "In this work, we conducted an extensive analysis of Single Transformer-based MLLMs compared to modular MLLMs. Our investigation explored the unique properties of Single Transformers, including scalability, cross-modal information flow patterns, and visual representation capabilities. A series of experiments on our trained SAIL model demonstrated that this unified architecture achieves performance on vision-language benchmarks comparable to modular MLLMs while also functioning effectively as a vision backbone. Our findings highlight several advantages of Single Transformer architectures, such as superior data scalability, vision-centric information flow, and inherent capabilities as a powerful vision encoder. We hope our empirical findings will inspire further research to refine and enhance Single Transformer architectures, advancing the field of multimodal intelligence." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 313, + 110, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 313, + 110, + 327 + ], + "spans": [ + { + "bbox": [ + 55, + 313, + 110, + 327 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 333, + 295, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 333, + 295, + 357 + ], + "spans": [ + { + "bbox": [ + 55, + 333, + 295, + 357 + ], + "type": "text", + "content": "In the appendix, we provide additional experimental details and results." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 365, + 211, + 378 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 365, + 211, + 378 + ], + "spans": [ + { + "bbox": [ + 55, + 365, + 211, + 378 + ], + "type": "text", + "content": "Additional Experimental Details" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 383, + 295, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 383, + 295, + 442 + ], + "spans": [ + { + "bbox": [ + 55, + 383, + 295, + 442 + ], + "type": "text", + "content": "Training Configurations. In this section, we provide the corresponding setups for our experiment series in the main paper, including the default setting, the data scaling series, the model scaling series, and ablation experiment settings. The detailed configurations are shown in Table 8." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 443, + 295, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 443, + 295, + 550 + ], + "spans": [ + { + "bbox": [ + 55, + 443, + 295, + 550 + ], + "type": "text", + "content": "Evaluation Configurations. In the main paper, we measure the model performance on several benchmarks: VQAv2 [28], GQA [36], ScienceQA-IMG [52], TextVQA [65], POPE [44], MME [46], MMBench [50], and SEEDBench-IMG [40]. We normalized the performance to a full score of 100 and averaged the performance across these benchmarks to plot the curves shown in Figure 1(A) and Figure 3. The detailed experimental results are shown in Table 9." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 559, + 212, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 559, + 212, + 571 + ], + "spans": [ + { + "bbox": [ + 55, + 559, + 212, + 571 + ], + "type": "text", + "content": "Additional Experimental Results" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 576, + 295, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 576, + 295, + 694 + ], + "spans": [ + { + "bbox": [ + 55, + 576, + 295, + 694 + ], + "type": "text", + "content": "A comparison of SAIL and LLaVA1.5. In this section, we conduct an experiment to compare SAIL with LLaVA1.5 [47]. In this experiment, our SAIL is trained on 512M image-text pairs in Pretraining Stage 1, followed by fine-tuning on the LLaVA-mix-665K dataset. To fairly compare the performance of the two models, we do not use the anyres strategy during SFT. Instead, we adopt the same image processing approach as LLaVA1.5, ensuring that the aspect ratio and number of image tokens are consistent across both models." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 696, + 296, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 696, + 296, + 733 + ], + "spans": [ + { + "bbox": [ + 55, + 696, + 296, + 733 + ], + "type": "text", + "content": "The experimental results are presented in Table 10. Despite our model being trained on only 512M image-text pairs, which is significantly smaller than the CLIP pretrain-" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 91, + 553, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 553, + 150 + ], + "type": "text", + "content": "ing data used in the LLaVA1.5 model, the results show that our model achieves comparable performance to LLaVA1.5 across various benchmarks. Remarkably, our model even outperforms LLaVA1.5 on specific benchmarks such as DocVQA and ChartVQA." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 152, + 555, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 152, + 555, + 236 + ], + "spans": [ + { + "bbox": [ + 313, + 152, + 555, + 236 + ], + "type": "text", + "content": "These findings highlight the strong potential of Single Transformer models in terms of data scaling. Specifically, they suggest that even with a relatively smaller pretraining dataset, Single Transformer models can perform on par with, or even exceed, more extensively trained modular MLLMs like LLaVA1.5 when similar preprocessing and controlled variables are applied." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 236, + 555, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 236, + 555, + 320 + ], + "spans": [ + { + "bbox": [ + 313, + 236, + 555, + 320 + ], + "type": "text", + "content": "Compare SAIL and LLaVA on MMVP. We compare SAIL and LLaVA1.5 [47] on MMVP [70] to dissect the behavior of the two models. The results are shown in Figure 5. From examples (A) and (B), we observe that SAIL performs better in perceiving minor regions and objects. Examples (C) and (D) illustrate that SAIL can more accurately distinguish the states of objects." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 321, + 554, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 321, + 554, + 429 + ], + "spans": [ + { + "bbox": [ + 313, + 321, + 554, + 429 + ], + "type": "text", + "content": "Additional Experiments on Information Flow Pattern Analysis. In the main paper, we analyzed the distribution patterns of image attention scores for different Single Transformer-based MLLMs and modular MLLMs. The results showed that Single Transformer-based MLLMs allocate more attention weights to image tokens. However, this could be due to different models processing varying numbers of image tokens, where more image tokens lead to higher aggregated attention scores." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 429, + 554, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 429, + 554, + 513 + ], + "spans": [ + { + "bbox": [ + 313, + 429, + 554, + 513 + ], + "type": "text", + "content": "To analyze this in a more controlled manner, we designed an additional experiment. Using the data scaling setup at 512M, we pretrained SAIL and its modular MLLM counterpart. After pretraining, we fine-tuned both models using the LLaVA-mix-665K dataset, fixing the resolution size to 224x224 during SFT, instead of using any resolution." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 514, + 554, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 554, + 574 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 554, + 574 + ], + "type": "text", + "content": "The results, shown in Figure 6, reveal that SAIL allocates higher attention scores to image tokens across all transformer layers compared to the modular MLLM, particularly in medium layers " + }, + { + "bbox": [ + 313, + 514, + 554, + 574 + ], + "type": "inline_equation", + "content": "(+43.5\\%)" + }, + { + "bbox": [ + 313, + 514, + 554, + 574 + ], + "type": "text", + "content": " in layer 14) and deep layers " + }, + { + "bbox": [ + 313, + 514, + 554, + 574 + ], + "type": "inline_equation", + "content": "(+41.2\\%)" + }, + { + "bbox": [ + 313, + 514, + 554, + 574 + ], + "type": "text", + "content": " in layer 31)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 574, + 554, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 574, + 554, + 647 + ], + "spans": [ + { + "bbox": [ + 313, + 574, + 554, + 647 + ], + "type": "text", + "content": "From this, we can conclude that Single Transformer-based MLLMs tend to allocate a significant portion of attention to previous image tokens during prediction. In contrast, modular MLLMs allocate a smaller portion of their attention directly to image tokens, indicating a less image-centric approach in their prediction mechanism." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 647, + 554, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 647, + 554, + 719 + ], + "spans": [ + { + "bbox": [ + 313, + 647, + 554, + 719 + ], + "type": "text", + "content": "Attention Map Visualization. In the main paper, we found that Single Transformer-based MLLMs allocate a large portion of attention weights to image tokens during inference, indicating a more vision-centric model. Here, we visualize the attention distribution of SAIL across different regions of the image when predicting tokens." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 326, + 719, + 553, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 719, + 553, + 732 + ], + "spans": [ + { + "bbox": [ + 326, + 719, + 553, + 732 + ], + "type": "text", + "content": "The results in Figure 7 illustrate the attention maps for" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 759 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 759 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 88, + 553, + 173 + ], + "blocks": [ + { + "bbox": [ + 58, + 88, + 553, + 173 + ], + "lines": [ + { + "bbox": [ + 58, + 88, + 553, + 173 + ], + "spans": [ + { + "bbox": [ + 58, + 88, + 553, + 173 + ], + "type": "table", + "html": "
ExpModelLLMStage 1Stage 2SFT
DataLRDataLRDataLR
Figure 1(A)SAIL, point 32MMistral-7B-v0.1Standard Stage 1 Data (32M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(A)SAIL, point 128MMistral-7B-v0.1Standard Stage 1 Data (128M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(A), Table 6SAIL, point512MMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 1(B), Table 2SAILMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)Standard Stage 2 Data (1e-5, 5e-6)Standard SFT Data(1e-5,0)
Table 3, 4, 5SAILMistral-7B-v0.1Standard Stage 1 Data (512M image-text pairs) (5e-5, 5e-6)----
Figure 3, Table 7SAIL-0.5BQwen2.5-0.5BStandard Stage 1 Data (128M image-text pairs) (5e-4, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 3SAIL-3BQwen2.5-3BStandard Stage 1 Data (128M image-text pairs) (1e-4, 5e-6)--LLaVA-mix-665K(1e-5,0)
Figure 3SAIL-7BMistral-7B-v0.1Standard Stage 1 Data (128M image-text pairs) (5e-5, 5e-6)--LLaVA-mix-665K(1e-5,0)
", + "image_path": "0c8b865553e262229522d49de1c266ad4d74ff2efba965f1aceb91bc26705e7b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 95, + 235, + 517, + 332 + ], + "blocks": [ + { + "bbox": [ + 55, + 179, + 555, + 222 + ], + "lines": [ + { + "bbox": [ + 55, + 179, + 555, + 222 + ], + "spans": [ + { + "bbox": [ + 55, + 179, + 555, + 222 + ], + "type": "text", + "content": "Table 8. Experimental Configurations for Various Settings. The table lists the models used, the specific LLM variants, the datasets, and learning rates (LR) applied during each training stage (Pretraining Stage 1, Pretraining Stage 2, and SFT). \"Standard Stage 1 Data\", \"Standard Stage 2 Data\" and \"Standard SFT Data\" are listed in Table 1. Specific points and tables/figures referred to in the text are also indicated." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 95, + 235, + 517, + 332 + ], + "lines": [ + { + "bbox": [ + 95, + 235, + 517, + 332 + ], + "spans": [ + { + "bbox": [ + 95, + 235, + 517, + 332 + ], + "type": "table", + "html": "
ModelVQAv2GQASciQA-IMGTextVQAPOPEMMEMMBenchSEEDBench-IMGNorm(avg
Figure 1, modular MLLM, 32M76.9658.768.4858.6888.17159969.4470.3161.41
Figure 1, modular MLLM, 128M78.4759.7870.0559.8286.78163868.5768.1161.52
Figure 1, modular MLLM, 512M80.0662.3870.3457.8583.14137970.8269.8361.86
Figure 1, SAIL, 32M70.5157.9563.3231.6781.77142148.2261.5151.93
Figure 1, SAIL, 128M76.3660.9362.6156.8685.5145853.9466.6057.91
Figure 1, SAIL, 512M78.5162.0667.4863.9486.04153056.7168.8360.51
Figure 3, SAIL-3B67.353.263.830.966.9820.844.655.447.80
Figure 3, SAIL-0.5B59.146.959.620.159.8761.4538.535.139.92
", + "image_path": "42cf6443894186feb0a1d9dae9b8337f49f0ae9fad351b32d6fc399fbad551af.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 94, + 362, + 516, + 396 + ], + "blocks": [ + { + "bbox": [ + 201, + 340, + 408, + 351 + ], + "lines": [ + { + "bbox": [ + 201, + 340, + 408, + 351 + ], + "spans": [ + { + "bbox": [ + 201, + 340, + 408, + 351 + ], + "type": "text", + "content": "Table 9. Detailed experimental results in the main paper." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 94, + 362, + 516, + 396 + ], + "lines": [ + { + "bbox": [ + 94, + 362, + 516, + 396 + ], + "spans": [ + { + "bbox": [ + 94, + 362, + 516, + 396 + ], + "type": "table", + "html": "
MethodPretrainSFTVQAv2GQASciQA-IMGTextVQAPOPEMMBenchSEEDbenchDocVQAChartQAAI2DMMStaravg
LLaVA-1.5-336px [47]12.8B+558K665K78.562.066.858.285.964.366.128.118.254.832.458.3
SAIL512M665K77.861.668.056.486.661.369.829.321.558.737.159.1
", + "image_path": "63cbc17f7f3795c8081971b405697dd160c73dc06136bdfd24067ac6eaeeb4cf.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 403, + 555, + 427 + ], + "lines": [ + { + "bbox": [ + 55, + 403, + 555, + 427 + ], + "spans": [ + { + "bbox": [ + 55, + 403, + 555, + 427 + ], + "type": "text", + "content": "Table 10. Comparison of SAIL and LLaVA1.5. We evaluate the models on VQAv2 [28], GQA [36], ScienceQA [52], TextVQA [65], POPE [44], MMBench [50], SEEDBench [40], DocVQA [56], ChartQA [55], AI2D [39] and MMStar [9]." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 447, + 295, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 447, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 55, + 447, + 295, + 555 + ], + "type": "text", + "content": "specific tokens to the image portion when SAIL generates predictions for multimodal queries. The visualizations show that in the early transformer layers, the predicted tokens primarily focus on the salient regions of the image. As the model progresses to deeper layers, the attention shifts to areas more relevant to the predicted tokens. This behavior demonstrates that SAIL has the potential to function as a grounding model, effectively correlating text tokens with their corresponding image regions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 555, + 295, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 555, + 295, + 627 + ], + "spans": [ + { + "bbox": [ + 55, + 555, + 295, + 627 + ], + "type": "text", + "content": "In other words, during inference, the model incrementally concentrates attention weights on relevant regions, aiding in decision-making. This progressive focusing of attention signifies the model's capability to ground text tokens in the corresponding visual context, enhancing its performance in vision-language tasks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 627, + 295, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 627, + 295, + 698 + ], + "spans": [ + { + "bbox": [ + 55, + 627, + 295, + 698 + ], + "type": "text", + "content": "Visual Understanding Demonstration. We investigate several vision perception and reasoning capabilities of our SAIL. These include its ability to understand rich OCR information (Table 11), interpret real-world scenes (Table 12), comprehend scientific charts (Table 13), and analyze poster contents (Table 14)." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 128, + 187, + 205 + ], + "blocks": [ + { + "bbox": [ + 112, + 128, + 187, + 205 + ], + "lines": [ + { + "bbox": [ + 112, + 128, + 187, + 205 + ], + "spans": [ + { + "bbox": [ + 112, + 128, + 187, + 205 + ], + "type": "image", + "image_path": "eb6a28a28a20b99c926f16b7385ebc1e417e5d659893ab587113a8b6f7597b5d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 209, + 273, + 220 + ], + "lines": [ + { + "bbox": [ + 111, + 209, + 273, + 220 + ], + "spans": [ + { + "bbox": [ + 111, + 209, + 273, + 220 + ], + "type": "text", + "content": "(A) Are there patterns on the easter eggs?" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 192, + 129, + 267, + 205 + ], + "blocks": [ + { + "bbox": [ + 192, + 129, + 267, + 205 + ], + "lines": [ + { + "bbox": [ + 192, + 129, + 267, + 205 + ], + "spans": [ + { + "bbox": [ + 192, + 129, + 267, + 205 + ], + "type": "image", + "image_path": "7637efd9ee2d43b4d77ee2d1c219080912e7103759b698c9370945ebd8bfd6db.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 170, + 220, + 215, + 228 + ], + "lines": [ + { + "bbox": [ + 170, + 220, + 215, + 228 + ], + "spans": [ + { + "bbox": [ + 170, + 220, + 215, + 228 + ], + "type": "text", + "content": "GT: Yes; No" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 113, + 259, + 189, + 335 + ], + "blocks": [ + { + "bbox": [ + 167, + 229, + 217, + 238 + ], + "lines": [ + { + "bbox": [ + 167, + 229, + 217, + 238 + ], + "spans": [ + { + "bbox": [ + 167, + 229, + 217, + 238 + ], + "type": "text", + "content": "SAIL: Yes; No" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 113, + 259, + 189, + 335 + ], + "lines": [ + { + "bbox": [ + 113, + 259, + 189, + 335 + ], + "spans": [ + { + "bbox": [ + 113, + 259, + 189, + 335 + ], + "type": "image", + "image_path": "a1c62c760920c7d865b90215f1f47dbfe4133eedf1aaf8f9b80698512acfbfef.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 97, + 341, + 280, + 352 + ], + "lines": [ + { + "bbox": [ + 97, + 341, + 280, + 352 + ], + "spans": [ + { + "bbox": [ + 97, + 341, + 280, + 352 + ], + "type": "text", + "content": "(C) Are the birds flapping upward or downward?" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 146, + 352, + 231, + 361 + ], + "lines": [ + { + "bbox": [ + 146, + 352, + 231, + 361 + ], + "spans": [ + { + "bbox": [ + 146, + 352, + 231, + 361 + ], + "type": "text", + "content": "GT: Upward; Downward" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 143, + 361, + 235, + 370 + ], + "lines": [ + { + "bbox": [ + 143, + 361, + 235, + 370 + ], + "spans": [ + { + "bbox": [ + 143, + 361, + 235, + 370 + ], + "type": "text", + "content": "SAIL: Upward; Downward" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 140, + 372, + 238, + 380 + ], + "lines": [ + { + "bbox": [ + 140, + 372, + 238, + 380 + ], + "spans": [ + { + "bbox": [ + 140, + 372, + 238, + 380 + ], + "type": "text", + "content": "LLaVA1.5: Upward; Upward" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 392, + 555, + 415 + ], + "lines": [ + { + "bbox": [ + 55, + 392, + 555, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 392, + 555, + 415 + ], + "type": "text", + "content": "Figure 5. Comparison of SAIL and LLaVA1.5 on MMVP examples. SAIL demonstrates better performance in perceiving minor regions and objects, as well as more accurately distinguishing object states." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 196, + 259, + 271, + 335 + ], + "blocks": [ + { + "bbox": [ + 157, + 239, + 227, + 247 + ], + "lines": [ + { + "bbox": [ + 157, + 239, + 227, + 247 + ], + "spans": [ + { + "bbox": [ + 157, + 239, + 227, + 247 + ], + "type": "text", + "content": "LLaVA1.5: Yes; Yes" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 196, + 259, + 271, + 335 + ], + "lines": [ + { + "bbox": [ + 196, + 259, + 271, + 335 + ], + "spans": [ + { + "bbox": [ + 196, + 259, + 271, + 335 + ], + "type": "image", + "image_path": "d6cdb7ea2a12e15ebee32a5cc29d4041d12e9a7a69f66eeb92347e9b7ed99910.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 326, + 129, + 402, + 205 + ], + "blocks": [ + { + "bbox": [ + 326, + 129, + 402, + 205 + ], + "lines": [ + { + "bbox": [ + 326, + 129, + 402, + 205 + ], + "spans": [ + { + "bbox": [ + 326, + 129, + 402, + 205 + ], + "type": "image", + "image_path": "9064d8a14fd89c407062d6f65d9550a3b3b10c965b254fe31126fd6726721b04.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 288, + 209, + 515, + 220 + ], + "lines": [ + { + "bbox": [ + 288, + 209, + 515, + 220 + ], + "spans": [ + { + "bbox": [ + 288, + 209, + 515, + 220 + ], + "type": "text", + "content": "(B) Are there any words displayed on the vehicle's lightbar?" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 380, + 220, + 424, + 228 + ], + "lines": [ + { + "bbox": [ + 380, + 220, + 424, + 228 + ], + "spans": [ + { + "bbox": [ + 380, + 220, + 424, + 228 + ], + "type": "text", + "content": "GT: Yes; No" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 406, + 129, + 482, + 205 + ], + "blocks": [ + { + "bbox": [ + 406, + 129, + 482, + 205 + ], + "lines": [ + { + "bbox": [ + 406, + 129, + 482, + 205 + ], + "spans": [ + { + "bbox": [ + 406, + 129, + 482, + 205 + ], + "type": "image", + "image_path": "c743f0fe8f49bafa26967c3602456ccc131b4c41de5f8eefbc16ca5bcc4d4e12.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 328, + 258, + 403, + 334 + ], + "blocks": [ + { + "bbox": [ + 376, + 229, + 427, + 238 + ], + "lines": [ + { + "bbox": [ + 376, + 229, + 427, + 238 + ], + "spans": [ + { + "bbox": [ + 376, + 229, + 427, + 238 + ], + "type": "text", + "content": "SAIL: Yes; No" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 367, + 239, + 436, + 247 + ], + "lines": [ + { + "bbox": [ + 367, + 239, + 436, + 247 + ], + "spans": [ + { + "bbox": [ + 367, + 239, + 436, + 247 + ], + "type": "text", + "content": "LLaVA1.5: Yes; Yes" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 328, + 258, + 403, + 334 + ], + "lines": [ + { + "bbox": [ + 328, + 258, + 403, + 334 + ], + "spans": [ + { + "bbox": [ + 328, + 258, + 403, + 334 + ], + "type": "image", + "image_path": "5cf82c40e2027f2eec837df6873535efaf1d21cde37f0251e7f842f4a80e5417.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 320, + 341, + 492, + 352 + ], + "lines": [ + { + "bbox": [ + 320, + 341, + 492, + 352 + ], + "spans": [ + { + "bbox": [ + 320, + 341, + 492, + 352 + ], + "type": "text", + "content": "(D) Is the elephant's trunk raised or lowered?" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 369, + 352, + 444, + 361 + ], + "lines": [ + { + "bbox": [ + 369, + 352, + 444, + 361 + ], + "spans": [ + { + "bbox": [ + 369, + 352, + 444, + 361 + ], + "type": "text", + "content": "GT: Raised; Lowered" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 365, + 361, + 447, + 370 + ], + "lines": [ + { + "bbox": [ + 365, + 361, + 447, + 370 + ], + "spans": [ + { + "bbox": [ + 365, + 361, + 447, + 370 + ], + "type": "text", + "content": "SAIL: Raised; Lowered" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 354, + 371, + 459, + 380 + ], + "lines": [ + { + "bbox": [ + 354, + 371, + 459, + 380 + ], + "spans": [ + { + "bbox": [ + 354, + 371, + 459, + 380 + ], + "type": "text", + "content": "LLaVA1.5: Lowered; Lowered" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 406, + 258, + 482, + 334 + ], + "blocks": [ + { + "bbox": [ + 406, + 258, + 482, + 334 + ], + "lines": [ + { + "bbox": [ + 406, + 258, + 482, + 334 + ], + "spans": [ + { + "bbox": [ + 406, + 258, + 482, + 334 + ], + "type": "image", + "image_path": "d74c79b2594ba8ce53efc20e4751a99dfed385f78b166c083f55da888145dbd3.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 156, + 477, + 455, + 654 + ], + "blocks": [ + { + "bbox": [ + 156, + 477, + 455, + 654 + ], + "lines": [ + { + "bbox": [ + 156, + 477, + 455, + 654 + ], + "spans": [ + { + "bbox": [ + 156, + 477, + 455, + 654 + ], + "type": "image", + "image_path": "86ea22e9302f393c6faa266a08d690c1d93155388ab664b38df9f56bb5bac5f9.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 663, + 555, + 696 + ], + "lines": [ + { + "bbox": [ + 55, + 663, + 555, + 696 + ], + "spans": [ + { + "bbox": [ + 55, + 663, + 555, + 696 + ], + "type": "text", + "content": "Figure 6. Image attention score allocation for SAIL and its modular MLLM counterpart. We compared the attention score allocation distribution for shallow layers, medium layers, and deep layers between these two models. The Single Transformer-based MLLM model significantly allocates a higher proportion of attention score to image tokens during prediction than the modular MLLM." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 200, + 220, + 406, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 220, + 406, + 232 + ], + "spans": [ + { + "bbox": [ + 200, + 220, + 406, + 232 + ], + "type": "text", + "content": "Query: When was the travel agency founded?" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 137, + 237, + 302, + 345 + ], + "blocks": [ + { + "bbox": [ + 137, + 237, + 302, + 345 + ], + "lines": [ + { + "bbox": [ + 137, + 237, + 302, + 345 + ], + "spans": [ + { + "bbox": [ + 137, + 237, + 302, + 345 + ], + "type": "image", + "image_path": "72403d5df5d4cd1878d5e33c5bd4d9fa9dd848225e493287b525c8fda5a85e1d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 170, + 346, + 271, + 357 + ], + "lines": [ + { + "bbox": [ + 170, + 346, + 271, + 357 + ], + "spans": [ + { + "bbox": [ + 170, + 346, + 271, + 357 + ], + "type": "text", + "content": "Output token: \"2\", Layer 2" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 309, + 237, + 473, + 345 + ], + "blocks": [ + { + "bbox": [ + 309, + 237, + 473, + 345 + ], + "lines": [ + { + "bbox": [ + 309, + 237, + 473, + 345 + ], + "spans": [ + { + "bbox": [ + 309, + 237, + 473, + 345 + ], + "type": "image", + "image_path": "ca7b4d2d7629b3566f0d32cb2b1bd1e264daec4f37ae471c87d0ab3de85774dd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 340, + 346, + 446, + 357 + ], + "lines": [ + { + "bbox": [ + 340, + 346, + 446, + 357 + ], + "spans": [ + { + "bbox": [ + 340, + 346, + 446, + 357 + ], + "type": "text", + "content": "Output token: “2”, Layer 25" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 228, + 382, + 378, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 382, + 378, + 393 + ], + "spans": [ + { + "bbox": [ + 228, + 382, + 378, + 393 + ], + "type": "text", + "content": "Query: What color is the Bicycle?" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 154, + 396, + 298, + 553 + ], + "blocks": [ + { + "bbox": [ + 154, + 396, + 298, + 553 + ], + "lines": [ + { + "bbox": [ + 154, + 396, + 298, + 553 + ], + "spans": [ + { + "bbox": [ + 154, + 396, + 298, + 553 + ], + "type": "image", + "image_path": "14245205aa410e48bd6a91506bad91fa760f5c60d89237bfedd1e75ea248e219.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 173, + 555, + 282, + 565 + ], + "lines": [ + { + "bbox": [ + 173, + 555, + 282, + 565 + ], + "spans": [ + { + "bbox": [ + 173, + 555, + 282, + 565 + ], + "type": "text", + "content": "Output token: \"red\", Layer 0" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 313, + 396, + 457, + 553 + ], + "blocks": [ + { + "bbox": [ + 313, + 396, + 457, + 553 + ], + "lines": [ + { + "bbox": [ + 313, + 396, + 457, + 553 + ], + "spans": [ + { + "bbox": [ + 313, + 396, + 457, + 553 + ], + "type": "image", + "image_path": "45ceef6727c7a3c12e1dcef96d22949a055822dcf3dcdc91bdcc1b96cfdab9b9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 555, + 443, + 565 + ], + "lines": [ + { + "bbox": [ + 331, + 555, + 443, + 565 + ], + "spans": [ + { + "bbox": [ + 331, + 555, + 443, + 565 + ], + "type": "text", + "content": "Output token: \"red\", Layer 31" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 582, + 555, + 615 + ], + "lines": [ + { + "bbox": [ + 55, + 582, + 555, + 615 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 555, + 615 + ], + "type": "text", + "content": "Figure 7. Visualization of SAIL's attention distribution across image regions during token prediction. In early transformer layers, attention primarily focuses on the salient regions of the image. As the model progresses to deeper layers, attention shifts to areas more relevant to the predicted tokens." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 243, + 311, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 243, + 311, + 255 + ], + "spans": [ + { + "bbox": [ + 75, + 243, + 311, + 255 + ], + "type": "text", + "content": "Example 1: Understanding OCR Information in Webpage." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 109, + 265, + 475, + 403 + ], + "blocks": [ + { + "bbox": [ + 109, + 265, + 475, + 403 + ], + "lines": [ + { + "bbox": [ + 109, + 265, + 475, + 403 + ], + "spans": [ + { + "bbox": [ + 109, + 265, + 475, + 403 + ], + "type": "image", + "image_path": "20f7c01d41fb79fbaccfe67a2b0667fe76827939bc0931c034efc63a00dd6df2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 85, + 405, + 231, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 405, + 231, + 415 + ], + "spans": [ + { + "bbox": [ + 85, + 405, + 231, + 415 + ], + "type": "text", + "content": "User Please output all the text information." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 85, + 418, + 520, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 418, + 520, + 544 + ], + "spans": [ + { + "bbox": [ + 85, + 418, + 520, + 544 + ], + "type": "text", + "content": "SAIL Cornell University arXiv cs arXiv:2304.08485 Computer Science Computer Vision and Pattern Recognition Visual Instruction Tuning HaoTian Liu, Chunyuan Li, Qingyang Wu, Yong Jae Lee Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLM for general-purpose visual and language visual and language this http URL URL Early Experiments: LLMs demonstrate impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLAVA and GPT-4 achieves a new state-of-the-art accuracy of " + }, + { + "bbox": [ + 85, + 418, + 520, + 544 + ], + "type": "inline_equation", + "content": "92.53\\%" + }, + { + "bbox": [ + 85, + 418, + 520, + 544 + ], + "type": "text", + "content": ". We make GPT-4 generated visual instruction tuning data, our model and code base publicly available. Comments: NeurIPS 2023 Oral; project page: this https URL Subjects: Computer Vision and Pattern Recognition (cs.CV); Artificial Intelligence (cs.AI); Computation and Language (cs.CL); Machine Learning (cs.LG) Cite as: arXiv:2304.08485 [cs.CV] (or arXiv:2304.08485v2 [cs.CV] for this version) https://doi.org/10.48550/arXiv.2304.08485 Submission history From: Haotian Liu [view email] [v1] Mon, 17 Apr 2023 17:59:25 UTC (4,360 KB) [v2] Mon, 11 Dec 2023 17:46:14 UTC (4,985 KB)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 568, + 473, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 568, + 473, + 579 + ], + "spans": [ + { + "bbox": [ + 132, + 568, + 473, + 579 + ], + "type": "text", + "content": "Table 11. SAIL demonstrates a strong capability to extract information in OCR-rich scenarios." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 194, + 261, + 206 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 194, + 261, + 206 + ], + "spans": [ + { + "bbox": [ + 76, + 194, + 261, + 206 + ], + "type": "text", + "content": "Example 2: Understanding Real-World Scene." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 110, + 215, + 474, + 451 + ], + "blocks": [ + { + "bbox": [ + 110, + 215, + 474, + 451 + ], + "lines": [ + { + "bbox": [ + 110, + 215, + 474, + 451 + ], + "spans": [ + { + "bbox": [ + 110, + 215, + 474, + 451 + ], + "type": "image", + "image_path": "cbf681ad40eed4080496a59ec1cab9482e6085a739b47fb697bc4e5fa99eabde.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 85, + 453, + 223, + 463 + ], + "lines": [ + { + "bbox": [ + 85, + 453, + 223, + 463 + ], + "spans": [ + { + "bbox": [ + 85, + 453, + 223, + 463 + ], + "type": "text", + "content": "User Please describe the image in detail." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 1 + }, + { + "bbox": [ + 85, + 467, + 265, + 476 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 467, + 265, + 476 + ], + "spans": [ + { + "bbox": [ + 85, + 467, + 265, + 476 + ], + "type": "text", + "content": "SAIL There are several notable elements in the image:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 476, + 520, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 476, + 520, + 591 + ], + "spans": [ + { + "bbox": [ + 107, + 476, + 520, + 591 + ], + "type": "text", + "content": "1. The highway is multi-lane, with multiple lanes visible. 2. The traffic is dense, with vehicles in various lanes. 3. The weather seems to be overcast, as indicated by the grayish tones of the image. 4. There are several traffic signs and signs visible, including a green highway sign indicating \"1/2 mile\" to the right, and a green sign with the number \"16\" and the word \"MILE\" on the left side of the image. 5. The background includes a hill or mountain, with trees and greenery visible. 6. There are also several power lines and electrical pylons visible along the sides of the highway. The image's focus is on the traffic and the surrounding environment. The vehicles are primarily in the center and right lanes of the highway, with some vehicles in the left lanes as well. The highway is flanked by lush greenery, including trees and grassy areas, suggesting that this is a rural or possibly a suburban area. The traffic appears to be moving at a moderate pace. The weather conditions suggest that this is a daytime scene, possibly during the morning or late afternoon. The image is taken from a perspective that suggests the viewer is in a vehicle on the highway, observing the traffic ahead. In summary, the image captures a typical highway scene with moderate traffic, surrounded by a mix of natural and built environments, under an overcast sky. The road is in good condition, and the vehicles are traveling smoothly in both directions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 617, + 466, + 628 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 617, + 466, + 628 + ], + "spans": [ + { + "bbox": [ + 138, + 617, + 466, + 628 + ], + "type": "text", + "content": "Table 12. SAIL demonstrates a strong capability about understanding the real-world scene." + } + ] + } + ], + "index": 5, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 185, + 271, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 185, + 271, + 198 + ], + "spans": [ + { + "bbox": [ + 75, + 185, + 271, + 198 + ], + "type": "text", + "content": "Example 3: Understanding the Scientific Charts." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 110, + 206, + 291, + 310 + ], + "blocks": [ + { + "bbox": [ + 110, + 206, + 291, + 310 + ], + "lines": [ + { + "bbox": [ + 110, + 206, + 291, + 310 + ], + "spans": [ + { + "bbox": [ + 110, + 206, + 291, + 310 + ], + "type": "image", + "image_path": "d4d1f138be91e3ae0a2bdd84b923df543b8646ec19b28b01a2a51525de73dedb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 111, + 311, + 225, + 361 + ], + "blocks": [ + { + "bbox": [ + 111, + 311, + 225, + 361 + ], + "lines": [ + { + "bbox": [ + 111, + 311, + 225, + 361 + ], + "spans": [ + { + "bbox": [ + 111, + 311, + 225, + 361 + ], + "type": "image", + "image_path": "61e34a4823cfcf7abfbd05b2e685db45e3739a868869559e15d3920fe25b1d5f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 228, + 311, + 290, + 361 + ], + "blocks": [ + { + "bbox": [ + 228, + 311, + 290, + 361 + ], + "lines": [ + { + "bbox": [ + 228, + 311, + 290, + 361 + ], + "spans": [ + { + "bbox": [ + 228, + 311, + 290, + 361 + ], + "type": "image", + "image_path": "6e114755f6e4fbd24b51e09e065d2fd1a5dd224fab94ab7cfcd565e63799f6e7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 111, + 361, + 211, + 474 + ], + "blocks": [ + { + "bbox": [ + 111, + 361, + 211, + 474 + ], + "lines": [ + { + "bbox": [ + 111, + 361, + 211, + 474 + ], + "spans": [ + { + "bbox": [ + 111, + 361, + 211, + 474 + ], + "type": "image", + "image_path": "2aa1f308a5e4d1cfb777d32372c4c543ebda75253e771dd662da3c40a4a9b1bf.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 85, + 481, + 224, + 491 + ], + "lines": [ + { + "bbox": [ + 85, + 481, + 224, + 491 + ], + "spans": [ + { + "bbox": [ + 85, + 481, + 224, + 491 + ], + "type": "text", + "content": "User Please describe the image in detail." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 85, + 495, + 520, + 601 + ], + "lines": [ + { + "bbox": [ + 85, + 495, + 520, + 601 + ], + "spans": [ + { + "bbox": [ + 85, + 495, + 520, + 601 + ], + "type": "text", + "content": "This image is a collage of various objects, foods, and animals, all of which are predominantly blue or have blue elements. The image is divided into sections labeled with letters from A to O. Here is a detailed description of each section: 1. A - Pumpkin: A blue pumpkin is depicted, which is a common symbol associated with autumn and Halloween. 2. B - Fish: A blue fish is shown, which could be a representation of a common aquatic animal. 3. C - M&M: A blue M&M candy is present.4. D - Milk and Soap: A glass of milk, a bar of soap, and a blue sponge.5. E - Bread and Jelly Beans: Blue wheat, a blue loaf of bread, and blue jelly beans.6. F - Castor Oil: A bottle of castor oil.7. G - Cheerios and Snacks: A box of Cheerios, a glass of a dark blue drink, a blue popsicle, a blue donut, and a blue muffin. 8. H - Vegetables: A collection of vegetables including a blue cucumber, green leafy vegetables, and a bunch of blue carrots.9. I - Apple and Stones: A green apple, a blue candy, and blue stones.10. J - Syrup and Candy: Sugar cubes, a bottle of blue syrup, and a blue candy bar.11. K - Coconut and Aloe: A blue coconut, a bottle of blue liquid, and a blue aloe vera plant.12. L - Pineapple and Juice: A blue pineapple, a blue banana, and a glass of blue juice.13. M - Candy Bar: A blue candy bar.14. N - Blueberries: Blueberries.15. O - Oval Object: A blue oval-shaped object." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 212, + 361, + 291, + 474 + ], + "blocks": [ + { + "bbox": [ + 212, + 361, + 291, + 474 + ], + "lines": [ + { + "bbox": [ + 212, + 361, + 291, + 474 + ], + "spans": [ + { + "bbox": [ + 212, + 361, + 291, + 474 + ], + "type": "image", + "image_path": "88ca0006929491eddb21a3c71ddbb465d70cdc0896585136bc6f707c683132d0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 292, + 206, + 474, + 285 + ], + "blocks": [ + { + "bbox": [ + 292, + 206, + 474, + 285 + ], + "lines": [ + { + "bbox": [ + 292, + 206, + 474, + 285 + ], + "spans": [ + { + "bbox": [ + 292, + 206, + 474, + 285 + ], + "type": "image", + "image_path": "7f811cf5851728d6bfd1446690b26f8821065017714d04d06116c144ae5224a7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 292, + 285, + 383, + 352 + ], + "blocks": [ + { + "bbox": [ + 292, + 285, + 383, + 352 + ], + "lines": [ + { + "bbox": [ + 292, + 285, + 383, + 352 + ], + "spans": [ + { + "bbox": [ + 292, + 285, + 383, + 352 + ], + "type": "image", + "image_path": "5c84f7550aeaf587e504883e32f2563a103b1875b315867b44936956a2766de4.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 383, + 285, + 472, + 352 + ], + "blocks": [ + { + "bbox": [ + 383, + 285, + 472, + 352 + ], + "lines": [ + { + "bbox": [ + 383, + 285, + 472, + 352 + ], + "spans": [ + { + "bbox": [ + 383, + 285, + 472, + 352 + ], + "type": "image", + "image_path": "c3f078f825632a0a8ecfc0f90209ccb5bb4ce6fa98f6b3253c233d20f4b9b7d4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 292, + 353, + 472, + 407 + ], + "blocks": [ + { + "bbox": [ + 292, + 353, + 472, + 407 + ], + "lines": [ + { + "bbox": [ + 292, + 353, + 472, + 407 + ], + "spans": [ + { + "bbox": [ + 292, + 353, + 472, + 407 + ], + "type": "image", + "image_path": "476bc9f3b006d0369e3d41f7a4f60b0559aa33a82c981e4c5ddb4b3946ce37ed.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 292, + 407, + 473, + 479 + ], + "blocks": [ + { + "bbox": [ + 292, + 407, + 473, + 479 + ], + "lines": [ + { + "bbox": [ + 292, + 407, + 473, + 479 + ], + "spans": [ + { + "bbox": [ + 292, + 407, + 473, + 479 + ], + "type": "image", + "image_path": "957eb719d56772c3ecd3d2e4f2915db952b9755d843d25088697d3eafffc2835.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 139, + 626, + 465, + 637 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 626, + 465, + 637 + ], + "spans": [ + { + "bbox": [ + 139, + 626, + 465, + 637 + ], + "type": "text", + "content": "Table 13. SAIL demonstrates a strong capability about understanding the scientific charts." + } + ] + } + ], + "index": 13, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 205, + 248, + 218 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 205, + 248, + 218 + ], + "spans": [ + { + "bbox": [ + 75, + 205, + 248, + 218 + ], + "type": "text", + "content": "Example 4: Understanding Poster Content." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 110, + 224, + 362, + 422 + ], + "blocks": [ + { + "bbox": [ + 110, + 224, + 362, + 422 + ], + "lines": [ + { + "bbox": [ + 110, + 224, + 362, + 422 + ], + "spans": [ + { + "bbox": [ + 110, + 224, + 362, + 422 + ], + "type": "image", + "image_path": "fd5dd7896f1375e93dd6ec6d4a24135dfe379dae2d45342a2a5c556982984744.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 85, + 423, + 223, + 432 + ], + "lines": [ + { + "bbox": [ + 85, + 423, + 223, + 432 + ], + "spans": [ + { + "bbox": [ + 85, + 423, + 223, + 432 + ], + "type": "text", + "content": "User Please describe the image in detail." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 85, + 436, + 520, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 436, + 520, + 571 + ], + "spans": [ + { + "bbox": [ + 85, + 436, + 520, + 571 + ], + "type": "text", + "content": "SAIL At the top of the image, there is a decorative border with a pattern of hanging golden ornaments, which adds a festive and celebratory feel to the overall design. The background is plain white, which helps the colorful elements stand out. In the center of the image, the text \"Happy Sankranti\" is prominently displayed in a large, elegant, and stylized font. The text is in a warm orange color, which is traditionally associated with the festival. Below the text, there is a depiction of a woman dressed in traditional Indian attire, likely a saree, in shades of blue and green. She is engaged in the act of preparing a traditional Sankranti sweet, which is a common practice during the festival. The sweet is shaped like a pyramid and is decorated with intricate patterns, reflecting the artistic and cultural heritage of the region. To the left of the woman, there is a three-tiered pot, known as a \"patala\", which is a traditional Indian cooking vessel. The pot is adorned with a colorful design and is placed on a colorful circular mat, which is also a traditional element in Indian households. On the right side of the woman, there is a small hut with a thatched roof, which is a common architectural style in rural Indian villages. The hut is depicted in a simple and rustic manner, emphasizing the rural and traditional aspect of the scene. Overall, the image captures the essence of the Sankranti festival, highlighting the cultural and religious aspects of the celebration. The use of traditional clothing, the preparation of traditional sweets, and the depiction of a rural village scene all contribute to a sense of cultural authenticity and celebration." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 605, + 461, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 605, + 461, + 616 + ], + "spans": [ + { + "bbox": [ + 143, + 605, + 461, + 616 + ], + "type": "text", + "content": "Table 14. SAIL demonstrates a strong capability about understanding the poster content." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 90, + 115, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 90, + 115, + 102 + ], + "spans": [ + { + "bbox": [ + 56, + 90, + 115, + 102 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 110, + 295, + 731 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 61, + 110, + 294, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 110, + 294, + 174 + ], + "spans": [ + { + "bbox": [ + 61, + 110, + 294, + 174 + ], + "type": "text", + "content": "[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 176, + 294, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 176, + 294, + 197 + ], + "spans": [ + { + "bbox": [ + 61, + 176, + 294, + 197 + ], + "type": "text", + "content": "[2] AI Anthropic. The claude 3 model family: Opus, sonnet, haiku. Claude-3 Model Card, 2024. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 198, + 295, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 198, + 295, + 252 + ], + "spans": [ + { + "bbox": [ + 62, + 198, + 295, + 252 + ], + "type": "text", + "content": "[3] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023.1, 2, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 255, + 294, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 255, + 294, + 285 + ], + "spans": [ + { + "bbox": [ + 62, + 255, + 294, + 285 + ], + "type": "text", + "content": "[4] Hangbo Bao, Li Dong, Songhao Piao, and Furu Wei. Beit: Bert pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021.3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 288, + 294, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 288, + 294, + 320 + ], + "spans": [ + { + "bbox": [ + 62, + 288, + 294, + 320 + ], + "type": "text", + "content": "[5] Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, and Sagnak Tasirlar. Introducing our multimodal models, 2023. 2, 5, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 322, + 294, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 322, + 294, + 376 + ], + "spans": [ + { + "bbox": [ + 62, + 322, + 294, + 376 + ], + "type": "text", + "content": "[6] Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. Conceptual 12m: Pushing web-scale image-text pretraining to recognize long-tail visual concepts. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3558-3568, 2021. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 377, + 294, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 377, + 294, + 441 + ], + "spans": [ + { + "bbox": [ + 62, + 377, + 294, + 441 + ], + "type": "text", + "content": "[7] Jun Chen, Deyao Zhu, Xiaogian Shen, Xiang Li, Zechun Liu, Pengchuan Zhang, Raghuraman Krishnamoorthi, Vikas Chandra, Yunyang Xiong, and Mohamed Elhoseiny. Minigpt-v2: large language model as a unified interface for vision-language multi-task learning. arXiv preprint arXiv:2310.09478, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 444, + 294, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 444, + 294, + 498 + ], + "spans": [ + { + "bbox": [ + 62, + 444, + 294, + 498 + ], + "type": "text", + "content": "[8] Jieneng Chen, Qihang Yu, Xiaohui Shen, Alan Yuille, and Liang-Chieh Chen. Vitamin: Designing scalable vision models in the vision-language era. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 499, + 294, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 499, + 294, + 552 + ], + "spans": [ + { + "bbox": [ + 62, + 499, + 294, + 552 + ], + "type": "text", + "content": "[9] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 5, 6, 10" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 555, + 294, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 555, + 294, + 609 + ], + "spans": [ + { + "bbox": [ + 57, + 555, + 294, + 609 + ], + "type": "text", + "content": "[10] Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. In European Conference on Computer Vision, pages 19-35. Springer, 2024. 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 610, + 294, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 610, + 294, + 653 + ], + "spans": [ + { + "bbox": [ + 57, + 610, + 294, + 653 + ], + "type": "text", + "content": "[11] Yangyi Chen, Xingyao Wang, Hao Peng, and Heng Ji. A single transformer for scalable vision-language modeling. Transactions on Machine Learning Research, 2024. 1, 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 655, + 294, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 655, + 294, + 696 + ], + "spans": [ + { + "bbox": [ + 57, + 655, + 294, + 696 + ], + "type": "text", + "content": "[12] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. arXiv preprint arXiv:2205.08534, 2022. 5, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 700, + 294, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 700, + 294, + 731 + ], + "spans": [ + { + "bbox": [ + 57, + 700, + 294, + 731 + ], + "type": "text", + "content": "[13] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 92, + 553, + 731 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 335, + 92, + 553, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 92, + 553, + 123 + ], + "spans": [ + { + "bbox": [ + 335, + 92, + 553, + 123 + ], + "type": "text", + "content": "the gap to commercial multimodal models with open-source suites. Science China Information Sciences, 67(12):220101, 2024. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 125, + 553, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 125, + 553, + 190 + ], + "spans": [ + { + "bbox": [ + 316, + 125, + 553, + 190 + ], + "type": "text", + "content": "[14] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 2, 3, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 192, + 553, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 192, + 553, + 266 + ], + "spans": [ + { + "bbox": [ + 317, + 192, + 553, + 266 + ], + "type": "text", + "content": "[15] Mehdi Cherti, Romain Beaumont, Ross Wightman, Mitchell Wortsman, Gabriel Ilharco, Cade Gordon, Christoph Schuhmann, Ludwig Schmidt, and Jenia Jitsev. Reproducible scaling laws for contrastive language-image learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2818-2829, 2023. 1, 2, 3, 6, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 269, + 553, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 269, + 553, + 323 + ], + "spans": [ + { + "bbox": [ + 317, + 269, + 553, + 323 + ], + "type": "text", + "content": "[16] Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhang-hao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yong-hao Zhuang, Joseph E. Gonzalez, Ion Stoica, and Eric P. Xing. Vicuna: An open-source chatbot impressing gpt-4 with " + }, + { + "bbox": [ + 317, + 269, + 553, + 323 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 317, + 269, + 553, + 323 + ], + "type": "text", + "content": " * chatgpt quality, 2023. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 324, + 553, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 324, + 553, + 367 + ], + "spans": [ + { + "bbox": [ + 317, + 324, + 553, + 367 + ], + "type": "text", + "content": "[17] MMSegmentation Contributors. MMSegmentation: Openmmlab semantic segmentation toolbox and benchmark. https://github.com/open-mmlab/mmsegmentation, 2020.5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 369, + 553, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 369, + 553, + 432 + ], + "spans": [ + { + "bbox": [ + 317, + 369, + 553, + 432 + ], + "type": "text", + "content": "[18] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. InstructBLIP: Towards general-purpose vision-language models with instruction tuning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 434, + 553, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 434, + 553, + 499 + ], + "spans": [ + { + "bbox": [ + 317, + 434, + 553, + 499 + ], + "type": "text", + "content": "[19] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Peter Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdul-mohsin, et al. Scaling vision transformers to 22 billion parameters. In International Conference on Machine Learning, pages 7480-7512. PMLR, 2023. 3, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 501, + 553, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 501, + 553, + 544 + ], + "spans": [ + { + "bbox": [ + 317, + 501, + 553, + 544 + ], + "type": "text", + "content": "[20] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 3, 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 545, + 553, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 545, + 553, + 598 + ], + "spans": [ + { + "bbox": [ + 317, + 545, + 553, + 598 + ], + "type": "text", + "content": "[21] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. In Advances in Neural Information Processing Systems, pages 52545-52567. Curran Associates, Inc., 2024. 1, 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 600, + 553, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 600, + 553, + 654 + ], + "spans": [ + { + "bbox": [ + 317, + 600, + 553, + 654 + ], + "type": "text", + "content": "[22] Haiwen Diao, Xiaotong Li, Yufeng Cui, Yueze Wang, Haoge Deng, Ting Pan, Wenxuan Wang, Huchuan Lu, and Xinlong Wang. Evev2: Improved baselines for encoder-free vision-language models. arXiv preprint arXiv:2502.06788, 2025. 5, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 656, + 553, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 656, + 553, + 731 + ], + "spans": [ + { + "bbox": [ + 317, + 656, + 553, + 731 + ], + "type": "text", + "content": "[23] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 310, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 759 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 759 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 91, + 296, + 731 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 56, + 91, + 296, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 91, + 296, + 157 + ], + "spans": [ + { + "bbox": [ + 56, + 91, + 296, + 157 + ], + "type": "text", + "content": "[24] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024. 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 159, + 296, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 159, + 296, + 213 + ], + "spans": [ + { + "bbox": [ + 56, + 159, + 296, + 213 + ], + "type": "text", + "content": "[25] Alaaeldin El-Nouby, Michal Klein, Shuangfei Zhai, Miguel Angel Bautista, Alexander Toshev, Vaishaal Shankar, Joshua M Susskind, and Armand Joulin. Scalable pretraining of large autoregressive image models. arXiv preprint arXiv:2401.08541, 2024. 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 215, + 296, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 215, + 296, + 280 + ], + "spans": [ + { + "bbox": [ + 56, + 215, + 296, + 280 + ], + "type": "text", + "content": "[26] Yuxin Fang, Wen Wang, Binhui Xie, Quan Sun, Ledell Wu, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. Eva: Exploring the limits of masked visual representation learning at scale. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 19358-19369, 2023. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 282, + 296, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 282, + 296, + 348 + ], + "spans": [ + { + "bbox": [ + 56, + 282, + 296, + 348 + ], + "type": "text", + "content": "[27] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, et al. Datacomp: In search of the next generation of multimodal datasets. Advances in Neural Information Processing Systems, 36:27092-27112, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 350, + 296, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 350, + 296, + 415 + ], + "spans": [ + { + "bbox": [ + 56, + 350, + 296, + 415 + ], + "type": "text", + "content": "[28] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 8, 9, 10" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 417, + 296, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 417, + 296, + 472 + ], + "spans": [ + { + "bbox": [ + 56, + 417, + 296, + 472 + ], + "type": "text", + "content": "[29] Shuhao Gu, Jialing Zhang, Siyuan Zhou, Kevin Yu, Zhaohu Xing, Liangdong Wang, Zhou Cao, Jintao Jia, Zhuoyi Zhang, Yixuan Wang, et al. Infinity-mm: Scaling multimodal performance with large-scale and high-quality instruction data. arXiv preprint arXiv:2410.18558, 2024. 4, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 474, + 296, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 474, + 296, + 550 + ], + "spans": [ + { + "bbox": [ + 56, + 474, + 296, + 550 + ], + "type": "text", + "content": "[30] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14375-14385, 2024. 5, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 552, + 296, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 552, + 296, + 605 + ], + "spans": [ + { + "bbox": [ + 56, + 552, + 296, + 605 + ], + "type": "text", + "content": "[31] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 608, + 296, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 608, + 296, + 662 + ], + "spans": [ + { + "bbox": [ + 56, + 608, + 296, + 662 + ], + "type": "text", + "content": "[32] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 664, + 296, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 664, + 296, + 708 + ], + "spans": [ + { + "bbox": [ + 56, + 664, + 296, + 708 + ], + "type": "text", + "content": "[33] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, et al. Cogagent: A visual language model for gui agents. arXiv preprint arXiv:2312.08914, 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 710, + 296, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 710, + 296, + 731 + ], + "spans": [ + { + "bbox": [ + 56, + 710, + 296, + 731 + ], + "type": "text", + "content": "[34] Xinyu Huang, Youcai Zhang, Jinyu Ma, Weiwei Tian, Rui Feng, Yuejie Zhang, Yaqian Li, Yandong Guo, and Lei" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 91, + 553, + 730 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 333, + 91, + 553, + 113 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 91, + 553, + 113 + ], + "spans": [ + { + "bbox": [ + 333, + 91, + 553, + 113 + ], + "type": "text", + "content": "Zhang. Tag2text: Guiding vision-language model via image tagging. arXiv preprint arXiv:2303.05657, 2023. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 316, + 114, + 553, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 114, + 553, + 146 + ], + "spans": [ + { + "bbox": [ + 316, + 114, + 553, + 146 + ], + "type": "text", + "content": "[35] Zilong Huang, Qinghao Ye, Bingyi Kang, Jiashi Feng, and Haoqi Fan. Classification done right for vision-language pretraining. In NeurIPS, 2024. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 148, + 553, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 148, + 553, + 201 + ], + "spans": [ + { + "bbox": [ + 316, + 148, + 553, + 201 + ], + "type": "text", + "content": "[36] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6700-6709, 2019. 8, 9, 10" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 202, + 553, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 202, + 553, + 266 + ], + "spans": [ + { + "bbox": [ + 316, + 202, + 553, + 266 + ], + "type": "text", + "content": "[37] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International conference on machine learning, pages 4904-4916. PMLR, 2021. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 269, + 553, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 269, + 553, + 322 + ], + "spans": [ + { + "bbox": [ + 316, + 269, + 553, + 322 + ], + "type": "text", + "content": "[38] Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. Mistral 7b. arXiv preprint arXiv:2310.06825, 2023. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 323, + 553, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 323, + 553, + 356 + ], + "spans": [ + { + "bbox": [ + 316, + 323, + 553, + 356 + ], + "type": "text", + "content": "[39] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images, 2016. 5, 6, 10" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 357, + 553, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 357, + 553, + 399 + ], + "spans": [ + { + "bbox": [ + 316, + 357, + 553, + 399 + ], + "type": "text", + "content": "[40] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023. 5, 6, 8, 9, 10" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 401, + 553, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 401, + 553, + 444 + ], + "spans": [ + { + "bbox": [ + 316, + 401, + 553, + 444 + ], + "type": "text", + "content": "[41] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 3, 5, 6, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 445, + 553, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 445, + 553, + 466 + ], + "spans": [ + { + "bbox": [ + 316, + 445, + 553, + 466 + ], + "type": "text", + "content": "[42] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for clip training. In NeurIPS, 2023. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 468, + 553, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 468, + 553, + 521 + ], + "spans": [ + { + "bbox": [ + 316, + 468, + 553, + 521 + ], + "type": "text", + "content": "[43] Xianhang Li, Haoqin Tu, Mude Hui, Zeyu Wang, Bingchen Zhao, Junfei Xiao, Sucheng Ren, Jieru Mei, Qing Liu, Huangjie Zheng, Yuyin Zhou, and Cihang Xie. What if we recapture billions of web images with llama-3? arXiv preprint arXiv:2406.08478, 2024. 4, 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 523, + 553, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 523, + 553, + 565 + ], + "spans": [ + { + "bbox": [ + 316, + 523, + 553, + 565 + ], + "type": "text", + "content": "[44] Yifan Li, Yifan Du, Kun Zhou, Jinping Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023. 6, 9, 10" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 567, + 553, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 567, + 553, + 620 + ], + "spans": [ + { + "bbox": [ + 316, + 567, + 553, + 620 + ], + "type": "text", + "content": "[45] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23390-23400, 2023. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 622, + 553, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 622, + 553, + 676 + ], + "spans": [ + { + "bbox": [ + 316, + 622, + 553, + 676 + ], + "type": "text", + "content": "[46] Zijing Liang, Yanjie Xu, Yifan Hong, Penghui Shang, Qi Wang, Qiang Fu, and Ke Liu. A survey of multimodel large language models. In Proceedings of the 3rd International Conference on Computer, Artificial Intelligence and Control Engineering, pages 405-409, 2024. 5, 6, 8, 9" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 677, + 553, + 730 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 677, + 553, + 730 + ], + "spans": [ + { + "bbox": [ + 316, + 677, + 553, + 730 + ], + "type": "text", + "content": "[47] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 6, 7, 9, 10" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 91, + 295, + 731 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 56, + 91, + 294, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 91, + 294, + 125 + ], + "spans": [ + { + "bbox": [ + 56, + 91, + 294, + 125 + ], + "type": "text", + "content": "[48] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava next: Improved reasoning,OCR, and world knowledge, 2024.3,6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 126, + 294, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 126, + 294, + 158 + ], + "spans": [ + { + "bbox": [ + 56, + 126, + 294, + 158 + ], + "type": "text", + "content": "[49] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. 1, 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 159, + 294, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 159, + 294, + 215 + ], + "spans": [ + { + "bbox": [ + 56, + 159, + 294, + 215 + ], + "type": "text", + "content": "[50] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In European conference on computer vision, pages 216-233. Springer, 2024. 5, 6, 8, 9, 10" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 216, + 294, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 216, + 294, + 270 + ], + "spans": [ + { + "bbox": [ + 56, + 216, + 294, + 270 + ], + "type": "text", + "content": "[51] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. Ocrbench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 67(12):220102, 2024. 5, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 271, + 294, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 271, + 294, + 336 + ], + "spans": [ + { + "bbox": [ + 56, + 271, + 294, + 336 + ], + "type": "text", + "content": "[52] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. Advances in Neural Information Processing Systems, 35:2507-2521, 2022. 5, 6, 8, 9, 10" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 338, + 294, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 338, + 294, + 392 + ], + "spans": [ + { + "bbox": [ + 56, + 338, + 294, + 392 + ], + "type": "text", + "content": "[53] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 5, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 394, + 294, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 394, + 294, + 449 + ], + "spans": [ + { + "bbox": [ + 56, + 394, + 294, + 449 + ], + "type": "text", + "content": "[54] Gen Luo, Xue Yang, Wenhan Dou, Zhaokai Wang, Jiawen Liu, Jifeng Dai, Yu Qiao, and Xizhou Zhu. Mono-internvl: Pushing the boundaries of monolithic multimodal large language models with endogenous visual pre-training. arXiv preprint arXiv:2410.08202, 2024. 1, 2, 3, 5, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 450, + 294, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 450, + 294, + 494 + ], + "spans": [ + { + "bbox": [ + 56, + 450, + 294, + 494 + ], + "type": "text", + "content": "[55] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 10" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 495, + 295, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 495, + 295, + 538 + ], + "spans": [ + { + "bbox": [ + 56, + 495, + 295, + 538 + ], + "type": "text", + "content": "[56] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF winter conference on applications of computer vision, pages 2200-2209, 2021. 10" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 540, + 294, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 540, + 294, + 605 + ], + "spans": [ + { + "bbox": [ + 56, + 540, + 294, + 605 + ], + "type": "text", + "content": "[57] Sachin Mehta, Maxwell Horton, Fartash Faghri, Mohammad Hossein Sekhavat, Mahyar Najibi, Mehrdad Farajtabar, Oncel Tuzel, and Mohammad Rastegari. Catlip: Clipsevel visual recognition accuracy with 2.7 x faster pretraining on web-scale image-text data. arXiv preprint arXiv:2404.15653, 2024. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 607, + 294, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 607, + 294, + 628 + ], + "spans": [ + { + "bbox": [ + 56, + 607, + 294, + 628 + ], + "type": "text", + "content": "[58] Meta. Introducing meta llama 3: The most capable openly available llm to date, 2024. Accessed: 2024-04-18. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 630, + 261, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 630, + 261, + 641 + ], + "spans": [ + { + "bbox": [ + 56, + 630, + 261, + 641 + ], + "type": "text", + "content": "[59] OpenAI. Introducing chatgpt. OpenAI Blog, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 642, + 225, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 642, + 225, + 652 + ], + "spans": [ + { + "bbox": [ + 56, + 642, + 225, + 652 + ], + "type": "text", + "content": "[60] OpenAI. Gpt-4 technical report, 2023. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 654, + 248, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 654, + 248, + 665 + ], + "spans": [ + { + "bbox": [ + 56, + 654, + 248, + 665 + ], + "type": "text", + "content": "[61] OpenAI. Gpt-4v(ision) system card, 2023. 1, 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 666, + 294, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 666, + 294, + 731 + ], + "spans": [ + { + "bbox": [ + 56, + 666, + 294, + 731 + ], + "type": "text", + "content": "[62] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 1, 2, 3, 6, 8" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 91, + 553, + 731 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 316, + 91, + 553, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 91, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 316, + 91, + 553, + 156 + ], + "type": "text", + "content": "[63] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. LAION-5B: An open large-scale dataset for training next generation image-text models. In NeurlPS, 2022. 3, 4, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 158, + 553, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 158, + 553, + 211 + ], + "spans": [ + { + "bbox": [ + 316, + 158, + 553, + 211 + ], + "type": "text", + "content": "[64] Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatronlm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053, 2019. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 213, + 553, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 213, + 553, + 267 + ], + "spans": [ + { + "bbox": [ + 316, + 213, + 553, + 267 + ], + "type": "text", + "content": "[65] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8317-8326, 2019. 5, 6, 8, 9, 10" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 269, + 553, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 269, + 553, + 334 + ], + "spans": [ + { + "bbox": [ + 316, + 269, + 553, + 334 + ], + "type": "text", + "content": "[66] Daria Soboleva, Faisal Al-Khateeb, Robert Myers, Jacob R Steeves, Joel Hestness, and Nolan Dey. SlimPajama: A 627B token cleaned and deduplicated version of RedPajama. https://cerebras.ai/blog/slimpajama-a-627b-token-cleaned-and-deduplicated-version-of-redpajama, 2023.4.5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 335, + 553, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 335, + 553, + 366 + ], + "spans": [ + { + "bbox": [ + 316, + 335, + 553, + 366 + ], + "type": "text", + "content": "[67] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 2, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 368, + 553, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 368, + 553, + 422 + ], + "spans": [ + { + "bbox": [ + 316, + 368, + 553, + 422 + ], + "type": "text", + "content": "[68] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 423, + 553, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 423, + 553, + 488 + ], + "spans": [ + { + "bbox": [ + 316, + 423, + 553, + 488 + ], + "type": "text", + "content": "[69] Peter Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Adithya Jairam Vedagiri IYER, Sai Charitha Akula, Shusheng Yang, Jihan Yang, Manoj Middepogu, Ziteng Wang, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. Advances in Neural Information Processing Systems, 37:87310-87356, 2024. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 490, + 553, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 490, + 553, + 544 + ], + "spans": [ + { + "bbox": [ + 316, + 490, + 553, + 544 + ], + "type": "text", + "content": "[70] Shengbang Tong, Zhuang Liu, Yuexiang Zhai, Yi Ma, Yann LeCun, and Saining Xie. Eyes wide shut? exploring the visual shortcomings of multimodal llms. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9568-9578, 2024. 9" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 544, + 553, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 544, + 553, + 587 + ], + "spans": [ + { + "bbox": [ + 316, + 544, + 553, + 587 + ], + "type": "text", + "content": "[71] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. Advances in Neural Information Processing Systems, 36:46830-46855, 2023. 3, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 589, + 553, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 589, + 553, + 632 + ], + "spans": [ + { + "bbox": [ + 316, + 589, + 553, + 632 + ], + "type": "text", + "content": "[72] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. Advances in Neural Information Processing Systems, 36:46830-46855, 2023. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 633, + 553, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 633, + 553, + 677 + ], + "spans": [ + { + "bbox": [ + 316, + 633, + 553, + 677 + ], + "type": "text", + "content": "[73] Haochen Wang, Junsong Fan, Yuxi Wang, Kaiyou Song, Tiancai Wang, Xiangyu Zhang, and Zhaoxiang Zhang. Bootstrap masked visual modeling via hard patches mining. arXiv preprint arXiv:2312.13714, 2023. 5" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 677, + 553, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 677, + 553, + 731 + ], + "spans": [ + { + "bbox": [ + 316, + 677, + 553, + 731 + ], + "type": "text", + "content": "[74] Haochen Wang, Kaiyou Song, Junsong Fan, Yuxi Wang, Jin Xie, and Zhaoxiang Zhang. Hard patches mining for masked image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10375-10385, 2023. 5" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 91, + 296, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 56, + 91, + 296, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 91, + 296, + 135 + ], + "spans": [ + { + "bbox": [ + 56, + 91, + 296, + 135 + ], + "type": "text", + "content": "[75] Haochen Wang, Anlin Zheng, Yucheng Zhao, Tiancai Wang, Ge Zheng, Xiangyu Zhang, and Zhaoxiang Zhang. Reconstructive visual instruction tuning. In International Conference on Learning Representations, 2025. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 137, + 296, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 137, + 296, + 203 + ], + "spans": [ + { + "bbox": [ + 56, + 137, + 296, + 203 + ], + "type": "text", + "content": "[76] Jiacong Wang, Bohong Wu, Haiyong Jiang, Zhou Xun, Xin Xiao, Haoyuan Guo, and Jun Xiao. World to code: Multimodal data generation via self-instructed compositional captioning and filtering. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4608-4623, 2024. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 204, + 296, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 204, + 296, + 270 + ], + "spans": [ + { + "bbox": [ + 56, + 204, + 296, + 270 + ], + "type": "text", + "content": "[77] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution, 2024. 1, 2, 3, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 271, + 296, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 271, + 296, + 304 + ], + "spans": [ + { + "bbox": [ + 56, + 271, + 296, + 304 + ], + "type": "text", + "content": "[78] Wenxuan Wang, Quan Sun, Fan Zhang, Yepeng Tang, Jing Liu, and Xinlong Wang. Diffusion feedback helps clip see better. arXiv preprint arXiv:2407.20171, 2024. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 304, + 296, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 304, + 296, + 358 + ], + "spans": [ + { + "bbox": [ + 56, + 304, + 296, + 358 + ], + "type": "text", + "content": "[79] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 2, 3, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 360, + 296, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 360, + 296, + 405 + ], + "spans": [ + { + "bbox": [ + 56, + 360, + 296, + 405 + ], + "type": "text", + "content": "[80] Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. SimVLM: Simple visual language model pretraining with weak supervision. In International Conference on Learning Representations, 2022. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 406, + 296, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 406, + 296, + 427 + ], + "spans": [ + { + "bbox": [ + 56, + 406, + 296, + 427 + ], + "type": "text", + "content": "[81] X.ai. Grok-1.5 vision preview. https://x.ai/blog/grok-1.5v, 2024.5, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 429, + 296, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 429, + 296, + 473 + ], + "spans": [ + { + "bbox": [ + 56, + 429, + 296, + 473 + ], + "type": "text", + "content": "[82] Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, and Jian Sun. Unified perceptual parsing for scene understanding. In Proceedings of the European conference on computer vision (ECCV), pages 418-434, 2018. 5, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 474, + 296, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 474, + 296, + 517 + ], + "spans": [ + { + "bbox": [ + 56, + 474, + 296, + 517 + ], + "type": "text", + "content": "[83] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 518, + 296, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 518, + 296, + 573 + ], + "spans": [ + { + "bbox": [ + 56, + 518, + 296, + 573 + ], + "type": "text", + "content": "[84] Qiying Yu, Quan Sun, Xiaosong Zhang, Yufeng Cui, Fan Zhang, Yue Cao, Xinlong Wang, and Jingjing Liu. Capsfusion: Rethinking image-text data at scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14022-14032, 2024. 4, 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 574, + 296, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 574, + 296, + 619 + ], + "spans": [ + { + "bbox": [ + 56, + 574, + 296, + 619 + ], + "type": "text", + "content": "[85] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 5, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 620, + 296, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 620, + 296, + 664 + ], + "spans": [ + { + "bbox": [ + 56, + 620, + 296, + 664 + ], + "type": "text", + "content": "[86] Mert Yuksekgonul, Federico Bianchi, Pratyusha Kalluri, Dan Jurafsky, and James Zou. When and why vision-language models behave like bags-of-words, and what to do about it? arXiv preprint arXiv:2210.01936, 2022. 5, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 665, + 296, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 665, + 296, + 708 + ], + "spans": [ + { + "bbox": [ + 56, + 665, + 296, + 708 + ], + "type": "text", + "content": "[87] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11975-11986, 2023. 3, 7, 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 709, + 296, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 709, + 296, + 732 + ], + "spans": [ + { + "bbox": [ + 56, + 709, + 296, + 732 + ], + "type": "text", + "content": "[88] Tao Zhang, Xiangtai Li, Zilong Huang, Yanwei Li, Weixian Lei, Xueqing Deng, Shihao Chen, Shunping Ji, and Jiashi" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 91, + 553, + 281 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 333, + 91, + 553, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 91, + 553, + 114 + ], + "spans": [ + { + "bbox": [ + 333, + 91, + 553, + 114 + ], + "type": "text", + "content": "Feng. Pixel-sail: Single transformer for pixel-grounded understanding. arXiv, 2025. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 114, + 553, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 114, + 553, + 179 + ], + "spans": [ + { + "bbox": [ + 316, + 114, + 553, + 179 + ], + "type": "text", + "content": "[89] Youcai Zhang, Xinyu Huang, Jinyu Ma, Zhaoyang Li, Zhaochuan Luo, Yanchun Xie, Yuzhuo Qin, Tong Luo, Yaqian Li, Shilong Liu, et al. Recognize anything: A strong image tagging model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1724-1732, 2024. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 181, + 553, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 181, + 553, + 236 + ], + "spans": [ + { + "bbox": [ + 316, + 181, + 553, + 236 + ], + "type": "text", + "content": "[90] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 237, + 553, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 237, + 553, + 281 + ], + "spans": [ + { + "bbox": [ + 316, + 237, + 553, + 281 + ], + "type": "text", + "content": "[91] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 1, 2, 3" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10465/9d3901f2-eace-4793-8815-51f41b459e25_content_list.json b/data/2025/2504_10xxx/2504.10465/9d3901f2-eace-4793-8815-51f41b459e25_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..206b27224b0894c5235561ca5e2ec86d28d36305 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/9d3901f2-eace-4793-8815-51f41b459e25_content_list.json @@ -0,0 +1,2470 @@ +[ + { + "type": "text", + "text": "Pixel-SAIL: Single Transformer For Pixel-Grounded Understanding", + "text_level": 1, + "bbox": [ + 153, + 130, + 841, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tao Zhang $^{1,2}$ Xiangtai Li $^{1}$ Zilong Huang $^{1}$ Yanwei Li $^{1}$ Weixian Lei $^{1}$ Xueqing Deng $^{1}$ Shihao Chen $^{2}$ Shunping Ji $^{2}$ Jiashi Feng $^{1}$ Bytedance Seed $^{2}$ WHU", + "bbox": [ + 147, + 178, + 833, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project Page: https://zhang-tao-whu.github.io/project/pixelsail", + "bbox": [ + 218, + 234, + 795, + 252 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d6b641908f41c0263a174252592424901ed71c8c03ba38b6916ca8c12726c9d7.jpg", + "image_caption": [ + "(a), Multi-modal Fusion with extra decoders" + ], + "image_footnote": [], + "bbox": [ + 93, + 282, + 377, + 422 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2ad3d40deefda5121871be7727625f1a4a15c5424e7c69bfaeec2b5381446be6.jpg", + "image_caption": [ + "(b), MLLM with segmentation experts", + "Figure 1. Comparison of current MLLMs for pixel-wise understanding with our method. (a) and (b). Current MLLMs for pixel-wise understanding feature highly complex system architectures, including an LLM, a CLIP-like vision backbone, an object token extraction model, a segmentation vision backbone, and a SAM-like decoder. (c). Our method employs only a single transformer." + ], + "image_footnote": [], + "bbox": [ + 408, + 268, + 647, + 436 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/efec56ff4c133b921837f8320aa383f2ce9a78bb129bdb23c2b4f1b278cce84e.jpg", + "image_caption": [ + "(c), Pixel SAIL with one single transformer" + ], + "image_footnote": [], + "bbox": [ + 686, + 284, + 856, + 421 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 246, + 521, + 326, + 536 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multimodal Large Language Models (MLLMs) achieve remarkable performance for fine-grained pixel-level understanding tasks. However, all the works rely heavily on extra components, such as vision encoder (CLIP), segmentation experts, leading to high system complexity and limiting model scaling. In this work, our goal is to explore a highly simplified MLLM without introducing extra components. Our work is motivated by the recent works on Single trTransformer as a unified vVision-Language Model (SAIL) design, where these works jointly learn vision tokens and text tokens in transformers. We present Pixel-SAIL, a single transformer for pixel-wise MLLM tasks. In particular, we present three technical improvements on the plain baseline. First, we design a learnable upsampling module to refine visual token features. Secondly, we propose a novel visual prompt injection strategy to enable the single transformer to understand visual prompt inputs and benefit from the early fusion of visual prompt embeddings and vision tokens. Thirdly, we introduce a vision expert distillation strategy to efficiently enhance the single transformer's fine-grained feature extraction capability. In addition, we have collected a comprehensive pixel understanding benchmark (PerBench), using a manual check. It includes three tasks:", + "bbox": [ + 88, + 554, + 483, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "detailed object description, visual prompt-based question answering, and visual-text referring segmentation. Extensive experiments on four referring segmentation benchmarks, one visual prompt benchmark, and our PerBench show that our Pixel-SAIL achieves comparable or even better results with a much simpler pipeline. Code and model will be released at https://github.com/magicresearch/Sa2VA.", + "bbox": [ + 511, + 522, + 906, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 513, + 650, + 643, + 664 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multi-modal Large Language Models (MLLMs) have garnered significant research efforts, driven by advancements of Large Language Models (LLMs) [22, 56, 65]. While most studies focus on open-ended visual question answering tasks, there is a growing interest [51, 80] in fine-grained, pixel-level understanding. This enables broader applications, such as facilitating precise region-level editing and generation and achieving precise understanding of designated mask regions.", + "bbox": [ + 509, + 675, + 906, + 811 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent pixel-wise MLLMs [27, 51, 54, 63, 72, 80, 81] mainly adopt visual and language fusion frameworks, following design patterns [17, 42, 68] established before the LLM era. For example, LAVIT [68] adopts encoder-fusion approach, injecting language embedding (generated by BERT [13]) into vision transformers. With the advent of", + "bbox": [ + 511, + 811, + 908, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "ByteDance | Seed", + "bbox": [ + 89, + 88, + 318, + 109 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10465v1 [cs.CV] 14 Apr 2025", + "bbox": [ + 22, + 261, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "LLMs [22, 65, 66], recent works [27, 54, 72, 80] integrate state-of-the-art segmentation models [26, 33, 53], for pixel-level understanding, by either appending them to LLM outputs or embedding LLM within segmentation pipelines. While effective, the overall architectures are complex, requiring specialized components such as vision-language fusion modules and additional decoders. Moreover, their final performance often heavily depends on either MLLMs or the segmentation models, which may lead to suboptimal results due to limitations within individual submodules.", + "bbox": [ + 89, + 113, + 480, + 263 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we explore a novel, simple yet effective pixel-wise MLLM design, drawing inspiration from recent advancements in SAIL architecture, which is also called Encoder-free MLLMs. These methods drop the extra vision encoder and jointly co-train vision and language tokens on large scale datasets, with a simpler design. Moreover, they show competitive performance on image-level VQA tasks, compared with LLaVA. Motivated by this success, we extend the framework to pixel-level understanding tasks, aiming to reduce the complexity of existing approaches. To the best of our knowledge, this is the first study to explore the simplest architecture for pixel-wise MLLM tasks, including referring segmentation and visual prompt understanding.", + "bbox": [ + 89, + 267, + 480, + 464 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We first directly extend SAIL architecture by adding segmentation token and visual prompt tokens to generate segmentation masks and output region caption, following previous works [27, 51, 74]. However, this leads to inferior results on both segmentation and visual prompt understanding. Several reasons are: (1), The misalignments on high resolution features since there are no segmentation decoders since SAIL directly reshape the vision tokens into features. (2), Previous works directly adopt mask pooling on high level visual tokens where SAIL baseline only maps RGB inputs with one projection layer, where most tokens are low level features. (3), The mask quality is low since no segmentation experts are involved.", + "bbox": [ + 89, + 465, + 480, + 662 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To solve these problems, we present three simple technical improvements, which lead to our Pixel-SAIL framework. First, we design a simple learnable up-sampling module to refine the low resolution visual tokens in high resolution features. Our goal is to keep the design as simple as possible, where only one transposed 2D convolution is involved. Then, for visual prompt understanding, we design a novel visual prompt injection method, where we map the visual prompts into special text tokens without introducing extra visual prompt encoder in the middle stage of SAIL. Next, we propose to distill the previous segmentation experts into SAIL to improve mask quality. All the improvements are plug-in-play, and we verify the effectiveness on various SAIL architectures, including SOLO [8] and EVEv2 [16].", + "bbox": [ + 89, + 665, + 480, + 891 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Then, to further indicate the effectiveness of our Pixel-SAIL and facilitate the development of pixel-LLM com", + "bbox": [ + 89, + 893, + 480, + 922 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "munity, we further design a new challenging benchmark, PerBench. Compared with previous pixel-wise MLLM benchmarks, we have three innovative and challenging features. First, we include a detailed object caption where most existing benchmarks only contain short captions without fine-gained contents. Secondly, we re-evaluate visual-prompt understanding as multi-choice VQA tasks following MME [20] and MMBench [43] to achieve more accurate region caption evaluation. Thirdly, we introduce a task by segmenting objects jointly referenced by visual prompts and text. Our benchmark reveals the limitation of current state-of-the-art pixel-wise MLLM on fine-grained understanding and mixed referring tasks.", + "bbox": [ + 511, + 114, + 903, + 310 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Pixel-SAIL is jointly co-trained with mixed data engine on referring segmentation datasets, VQA datasets, and visual prompt datasets. Experimental results show that our method can achieve better results on five pixel-wise benchmarks. In particular, on RefCOCOg and RefCOCO+ datasets, our method with 3B size can outperform previous pixel MLLMs, including GLaMM (7B) and OMG-LLaVA (7B), by $1.5 - 3.0\\%$ with a simpler pipeline. On our Per-Bench, our method achieves 24.2 METEOR, $74\\%$ accuracy, 33.4 cIoU and 42.2 overall score, surpassing the SOTA MLLMs GLaMM (7B) and Sa2VA (4B) with overall scores of 26.9 and 3.2, respectively.", + "bbox": [ + 511, + 311, + 903, + 492 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 513, + 506, + 653, + 522 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Large Vision Language Models. Staring from CLIP [50] and ALIGN [24], modern vision language models have adopted contrastive learning on large-scale image-text datasets for learning vision-text aligned representations. The trained models are also proven to work well on open-vocabulary perception, such as segmentation [45, 71, 78, 79] and detection [21, 58, 61, 75]. The following works [31, 32, 64, 76] share the same network design, exploring modified loss functions and targeting data quality and filtering. Then, with the rise of large language models [5, 22, 56, 65], recent works [1, 10, 11, 40, 55, 77] mainly focus on multimodal large language models for open-ended settings, such as visual question answering or OCR benchmarks. On representative work, LLaVA [40], uses the CLIP to encode images into visual tokens and sends the visual tokens to LLMs. After that, the following works [1, 30, 41] improve designs with scaled high quality datasets, images, and videos constraining. Meanwhile, several recent works [8, 14, 16, 46] also explore the visual encoder-free designs, which jointly learn the image and text representation in a single transformer architecture. For example, SOLO [8] collects mixed language and vision datasets and trains one transformer for VQA tasks, while EVE [14] designs a CLIP supervision to enhance visual token learning. Our work follows the visual encoder-free design, and we go a step further by exploring pixel-grounded understanding tasks, including ground", + "bbox": [ + 511, + 531, + 903, + 924 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ing tasks and visual prompt understanding. To our knowledge, we are the first to apply encoder-free architecture for pixel-grounded understanding tasks.", + "bbox": [ + 89, + 114, + 480, + 159 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Referring Expression Segmentation. This task outputs specific masks driven by text description. Earlier works [19, 23, 36, 39, 67] explore various fusion architecture and modules to enhance text and vision feature alignments. Equipped with LLMs, several recent advanced works [27, 48, 49, 51, 63, 72, 73, 80, 82] propose more complex referring tasks, including reasoning referring or joint mask and caption generation. In particular, LISA [27] involves complex expression while GLaMM [51] annotates a new dataset and proposes region-level caption and segmentation tasks. However, all these works contain complex designs: extra vision encoders, segmentation encoders, mask decoders, and prompt encoders. Our method, Pixel-SAIL, only has one transformer to jointly learn the joint visual and language feature. With proposed data engine and improved methods, Pixel-SAIL achieves good results with much simpler architecture.", + "bbox": [ + 89, + 161, + 482, + 416 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Visual Prompt Understanding. Understanding visual prompts plays an important role when building interaction between VLMs and human. Recent works [4, 38, 47, 51, 74] build new visual prompt datasets for region caption generation and prompt-aware VQA tasks. ViP-LLaVA [4] overlays the visual prompts directly onto the image canvas and fine-tunes the LLaV on a specific visual prompt dataset, while Osprey [74] explores pixel-wise mask regions into language instructions. Our method can also be extended into visual prompt understanding with our proposed prompt token injection design.", + "bbox": [ + 89, + 417, + 482, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 89, + 602, + 181, + 617 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Encoder Free MLLM and Plain Baseline", + "text_level": 1, + "bbox": [ + 89, + 627, + 439, + 643 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, several encoder-free MLLMs [8, 15, 16, 46] achieve comparable performance with those extra vision encoders. These models jointly learn vision and text features in a single transformer, with much simpler architecture. In particular, SOLO uses a simple project layer to map the image into visual tokens and then combines language tokens as the inputs of the transformer. However, no works have explored such new architecture for fine-grained vision language tasks (region caption, referring masks).", + "bbox": [ + 89, + 650, + 482, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Plain Baseline. To fill this gap, we first construct a plain single transformer baseline, motivated by the previous ViT-based MLLMs [27, 72]. We start it with a pre-trained encoder-free MLLM. For segmentation tasks, we modify previous mask generation methods into the single transformer. First, we reshape the hidden states of the last transformer layer of vision tokens $\\mathcal{V} \\in \\mathbb{R}^{N \\times C}$ into image features $\\mathcal{F} \\in \\mathbb{R}^{\\frac{H}{S} \\times \\frac{W}{S} \\times C}$ . $N$ represents the number of vision tokens, $C$ denotes the channel size, $H$ and $W$ indicate", + "bbox": [ + 89, + 787, + 482, + 924 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the height and width of the image, $S$ stands for the down-sampling stride. Then, the image features are then crossmultiplied with the hidden states of the predicted segmentation token $\\mathcal{Q} \\in \\mathbb{R}^{K \\times C}$ to generate the segmentation masks $\\mathcal{M} \\in \\mathbb{R}^{K \\times \\frac{H}{S} \\times \\frac{W}{S}}$ . $K$ signifies the number of predicted segmentation tokens, following previous works [27, 51]. For visual prompt understanding, we employ a pooling-based method [74] to derive object representations $\\mathcal{O} \\in \\mathbb{R}^{M \\times C}$ from image patch embeddings $\\mathcal{P} \\in \\mathbb{R}^{\\frac{H}{P} \\times \\frac{W}{P} \\times C}$ . These object embeddings are fed into the single transformer to represent the corresponding objects. $M$ represents the number of visual prompts, and $P$ denotes the patch size. For segmentation tasks, we adopt extra mask loss. Otherwise, we adopt the same text loss for VQA tasks and visual prompt understanding tasks.", + "bbox": [ + 511, + 114, + 903, + 340 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Limitation. The plain baseline demonstrates a certain level of pixel-text alignment capability since both segmentation token and visual prompt token are jointly learned with vision and language tokens. However, the plain baseline exhibits several significant shortcomings: 1) The segmentation mask quality is poor due to the large feature down-sampling stride (16 or 32), even when using simple pixel shuffle or bilinear interpolation for up-sampling. 2) The single transformer struggles to comprehend the referential target of object representation, as the object representation is summarized from image patch embeddings with poor semantic information.", + "bbox": [ + 511, + 342, + 906, + 523 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Pixel-SAIL Method", + "text_level": 1, + "bbox": [ + 511, + 537, + 700, + 551 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the substantial shortcomings, the performance of plain baseline in fine-grained pixel understanding tasks falls significantly, compared to vision-expert competitors (Sec.4). To solve these challenges, we have implemented three key enhancements to the baseline architecture. First, we integrate a learnable up-sampling module to fully exploit the segmentation capabilities of the single transformer architecture. Second, we develop an innovative visual prompt injection mechanism that facilitates effective interpretation of visual prompt inputs. Our method enables early-stage fusion between vision tokens and visual prompt embeddings. Finally, we introduce a dense feature distillation strategy that significantly improves the model's capacity for extracting fine-grained visual features. These improvements collectively address the shortcomings of the plain baseline while maintaining its architectural simplicity.", + "bbox": [ + 511, + 560, + 903, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Learnable Up-sampling Module. Inspired by [35], we also incorporate a simple learnable up-sampling model $\\mathcal{U}$ to generate the high-resolution features $F_{h} \\in \\mathbb{R}^{\\frac{H}{4} \\times \\frac{W}{4} \\times C}$ essential for pixel-level grounding. The up-sampling module comprises multiple up-sampling blocks, each consisting of a transposed 2D convolution followed by a depth-wise convolution. It effectively upscales the low-resolution features $F_{l} \\in \\mathbb{R}^{\\frac{H}{S} \\times \\frac{W}{S} \\times C}$ , derived from resized vision tokens,", + "bbox": [ + 511, + 803, + 903, + 924 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 946, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/4d7c01c3de069a344975deff39094f9b32939af00d3ab7023456d2e4efc96069.jpg", + "image_caption": [ + "Figure 2. The architecture of our proposed plain baseline and Pixel-SAIL. Pixel-SAIL is as simple and elegant as the plain baseline but demonstrates significantly improved performance. The examples on the right demonstrate that Pixel-SAIL possesses the capability for general conversation and comprehensive pixel-grounded understanding." + ], + "image_footnote": [], + "bbox": [ + 124, + 111, + 872, + 321 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "to one-quarter of the original resolution.", + "bbox": [ + 89, + 393, + 356, + 407 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Visual Prompt Injection. Previous works [51, 72, 74] summarize the referenced object features via pooling on vision tokens from ViT encoder. However, there are no such visual tokens for encoder-free MLLMs. Thus, the inherent semantic deficiency hinders the single transformer's ability to precisely identify referenced objects based solely on feature summaries derived from patch embeddings, where most are low-level cues, such as edges.", + "bbox": [ + 89, + 411, + 483, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To overcome this limitation, we propose an innovative visual prompt injection mechanism. Our approach integrates multiple visual prompt special tokens $\\{VP_{i}|i\\in [1,N]\\}$ into the large language model's vocabulary. These tokens' text embeddings $\\mathcal{V}\\mathcal{P}^t\\in \\mathbb{R}^{N\\times C}$ are used to fill mask-based visual prompts $\\mathcal{M}^{vp}\\in \\mathbb{R}^{N\\times \\frac{H}{P}\\times \\frac{W}{P}}$ , thereby creating visual prompt tokens $\\mathcal{V}\\mathcal{P}\\in \\mathbb{R}^{\\frac{HW}{P^2}\\times C}$ . The vision tokens $\\mathcal{V}\\in \\mathbb{R}^{\\frac{HW}{P^2}\\times C}$ are first added with these visual prompt tokens $\\mathcal{V}\\mathcal{P}$ before being processed by the single transformer. This enhancement enables the model to accurately identify referenced objects by leveraging the corresponding special tokens $\\{VP_{i}|i\\in [1,N]\\}$ within the text instructions.", + "bbox": [ + 89, + 537, + 483, + 737 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Dense Feature Distillation. Due to the lack of large-scale, high-quality segmentation data like SA-1B [26], the method produces poor-quality masks, particularly at object boundaries. However, directly training on large-scale segmentation datasets would be costly and damage the original instruction following capabilities. To address both, we employ pre-trained segmentation experts to distill the single transformer, ensuring optimization of object details without hurting VQA capabilities. We perform distillation by leveraging mask features generated by Mask2Former's [12] pixel decoder on the upsampled mask features $F_{h} \\in \\mathbb{R}_{\\frac{H}{4}}^{\\frac{H}{4} \\times \\frac{W}{4} \\times C}$ and utilizing features produced by SAM2's [53] encoder", + "bbox": [ + 89, + 742, + 483, + 924 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/fbef5dad172896c2b1b5166bd4ab0c83cd1af418d9f92e034ba690b473c1a42b.jpg", + "image_caption": [ + "Figure 3. Visual examples on our PerBench. Best view it in color and zoom in." + ], + "image_footnote": [], + "bbox": [ + 516, + 390, + 903, + 674 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "on the low-resolution features $F_{l} \\in \\mathbb{R}^{\\frac{H}{S} \\times \\frac{W}{S} \\times C}$ . This simple distillation strategy improves segmentation quality with only a negligible increase in training time.", + "bbox": [ + 511, + 719, + 906, + 768 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Benchmark and Dataset Engine", + "text_level": 1, + "bbox": [ + 511, + 779, + 794, + 796 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our Benchmark: PerBench. We further manually annotate a benchmark named PerBench (Pixel-grounded Understanding Benchmark). PerBench aims to address three aspects lacking in existing pixel grounding benchmarks.", + "bbox": [ + 511, + 801, + 905, + 876 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The first aspect is detailed object caption. Previous works [6, 34] have emphasized more detailed image captions, demonstrating that comprehensive captions signifi", + "bbox": [ + 511, + 878, + 905, + 924 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "cantly enhance model performance. However, current object caption datasets such as Osprey-724k [74] and evaluation benchmarks like Refcocog provide only cursory object captions. To address this limitation, we leverage SOTA models InternVL2.5-78B [11] and Qwen2.5VL-72B [2] to generate detailed object captions. These detailed object captions are then meticulously screened and refined through manual review, ultimately yielding 500 precise, nuanced object captions to serve as a robust evaluation benchmark. METEOR [3] serves as the evaluation metric for the detailed object caption task.", + "bbox": [ + 88, + 114, + 480, + 281 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The second aspect is the assessment of visual-prompt understanding ability in multiple-choice format. Although captioning tasks can accurately reflect a model's visual prompt understanding ability, precise and fair evaluation is difficult. Rule-based metrics such as CIDEr [57] and METEOR [3] are affected by response length, format, and ground-truth quality, while using models as evaluators inevitably introduces model bias. Therefore, a fair and quantitative visual-prompt understanding benchmark is necessary. Inspired by MMBench [43] and MME [20], we manually annotated 500 multiple-choice questions based on detailed object captions, covering the examination of models' understanding of referenced objects' appearance, attributes, uses, and relationships with surrounding objects. MLLMs need to perceive the attributes of referenced objects accurately and have instruction-following ability to select the appropriate choice correctly. Accuracy is selected as the evaluation metric for the visual prompt-based multiple-choice questions.", + "bbox": [ + 93, + 282, + 483, + 568 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The third aspect is segmenting objects jointly referenced by visual prompts and text, abbreviated as V-T RES. It aims to test the model's ability to understand objects indicated by user-input visual prompts and segment associated objects according to text instructions. This task comprehensively assesses the MLLM's pixel-grounded understanding ability, requiring the model to possess precise visual prompt understanding capabilities, text reasoning abilities, and pixel grounding skills. We also manually annotate 500 V-T RES samples, which five expert annotators double-check. Similar with RefCOCO series datasets, we select cIoU and gIoU as the evaluation metric for V-T RES task. The overall score of PerBench is the average of the normalized scores (0-100) from the above three tasks.", + "bbox": [ + 89, + 571, + 482, + 781 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our benchmark can be used to evaluate pixel-wise MLLMs and point out more challenging directions for detailed object understanding, joint visual prompts, and text understanding to the current community.", + "bbox": [ + 89, + 784, + 482, + 845 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Dataset Engine. To fully unleash the potential of the single transformer, we collect diverse pixel-grounded data, including segmentation datasets and visual-prompt understanding datasets, following previous works [16, 46].", + "bbox": [ + 89, + 845, + 482, + 906 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For segmentation-related data, we first use Ref-", + "bbox": [ + 109, + 909, + 480, + 922 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "COCO+/g [25, 70] and COCO [37] semantic segmentation data used in LISA [27], the Grandf dataset (214k samples) used in GLaMM [51], and MUSE data (246k samples) used in PixelLM [54]. We also use recent Pixel2Cap [69] data (comprising 20k images) and organized it into the referring segmentation format. Finally, we further add COCO [37] panoptic segmentation data and structured it as: \"Question: Please segment the {class name} in instance mode. Answer: {class name}-1 [SEG], ..., {class name}-n [SEG].\"", + "bbox": [ + 511, + 114, + 903, + 251 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For visual prompt understanding, we employ two public datasets: Osprey-724k [74] and Pixel2Cap [69]. Additionally, we reformat the COCO dataset into a question-answer structure specifically designed to query object categories. To enhance the model's capability for fine-grained object description, we prompt the InternVL2.5-78B [11] model to generate approximately 300k detailed object captions derived from 10k SA-1B [26] images. Lastly, to maintain the instruction following ability, we also integrate the LLaVA1.5 [40] 665k dataset into our training data.", + "bbox": [ + 511, + 251, + 903, + 400 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training. We combine all the aforementioned data for cotraining. The loss function consists of the next token prediction loss $\\mathcal{L}_{ntp}$ , the segmentation loss $\\mathcal{L}_{seg}$ , and the distillation loss $\\mathcal{L}_{distill}$ :", + "bbox": [ + 511, + 401, + 903, + 462 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {n t p} + \\mathcal {L} _ {s e g} + \\alpha \\mathcal {L} _ {\\text {d i s t i l l}}, \\quad \\mathcal {L} _ {s e g} = \\lambda \\mathcal {L} _ {c e} + \\beta \\mathcal {L} _ {\\text {s e g}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 521, + 469, + 903, + 486 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\alpha$ is set to 0.5, $\\lambda$ to 2.0 and $\\beta$ to 0.5.", + "bbox": [ + 511, + 493, + 797, + 508 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 511, + 521, + 638, + 537 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation Details. We extensively evaluate our meta-architecture using two open-source encoder-free multimodal large language models: SOLO [8] and EVEv2 [16]. For SOLO, following [28], we modify the attention mechanism between vision tokens from causal attention to full attention and conduct supervised fine-tuning on the LLaVA1.5 665k dataset. For SOLO, we modify the attention mechanism between vision tokens from causal attention to full attention and replace the LLM with Qwen2.5 [66] 0.5B and 3B, respectively. For EVEv2, we retain its original architecture and weights without any modifications. We build Pixel-SAIL 0.5B and 3B based on our modified SOLO baseline, and 7B on EVEv2. When training Pixel-SAIL based on SOLO, we maintain the original resolution of input images. For images with a long side exceeding 1024, we preserve the aspect ratio and resize the long side to 1024. When training Pixel-SAIL based on EVEv2, we resize the images to the closest to $800^2$ pixels to reduce training costs, which differs from the original setting of $1600^2$ . The training process is conducted on 32 A100 (80GB) GPUs using the AdamW [44] optimizer with a cosine decay learning rate scheduler. We set the initial learning rate to 4e-5, the warm-up ratio to 0.03, and the batch size to 256. The training duration for the 0.5B and 3B models is 12 hours and 24 hours, respectively.", + "bbox": [ + 511, + 546, + 906, + 924 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/2467c73883bd26062ac816e1e140539ec76f0e812af0524f67af05492787cfd0.jpg", + "table_caption": [ + "Table 1. Performance on referring segmentation benchmarks. The evaluation metric is cIoU. \"ft\" denotes fine-tuning on the specific dataset." + ], + "table_footnote": [], + "table_body": "
MethodLLM SizeRefCOCO+RefCOCOgRefCOCOgRefCOCO
valtestAtestBval(U)test(U)valtestAtestBvaltestAtestB
Referring Segmentation Specialist Without MLLM
VLT [17]-56.361.050.155.057.767.570.565.252.562.250.5
CRIS [59]-62.368.153.759.960.470.573.266.155.363.851.0
LAVT [68]-62.168.455.161.262.172.775.868.857.665.355.0
PolyFormer-L [42]-69.374.661.969.270.276.078.373.3---
ReLA [39]-66.071.057.765.066.073.876.570.256.459.058.4
MLLMs With Vision Expert
LISA (ft) [27]7B65.170.858.167.970.674.979.172.3---
PixelLM [54]7B66.371.758.369.370.573.076.568.2---
GSVA (ft) [63]7B64.567.758.671.172.076.477.472.861.769.260.3
GroundHog [81]7B70.575.064.974.174.678.579.975.766.7--
GlaMM (ft) [51]7B72.678.764.674.274.979.583.276.9---
SAM4MLLM [9]7B73.577.865.874.575.679.682.876.166.370.163.2
LaSagnA [60]7B66.470.660.170.671.976.878.773.838.150.442.1
OMG-LLaVA (ft) [80]7B69.173.163.072.972.978.080.374.1---
F-LLM [62]7B65.875.258.570.171.775.879.572.4---
Sa2VA [72]4B74.3--76.7-80.4-----
MLLMs Without Vision Expert
Pixel-SAIL0.5B70.875.865.475.476.777.980.575.963.971.563.6
Pixel-SAIL (ft)0.5B73.077.068.075.676.179.181.777.068.074.066.8
Pixel-SAIL3B75.779.772.078.780.480.882.679.067.774.667.1
Pixel-SAIL (ft)3B76.279.771.278.579.481.883.478.872.177.170.4
", + "bbox": [ + 148, + 142, + 844, + 478 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/be2b66fed29fc8e20e40409c20d73777c16880d53864ef80c8dc867c5ad40a35.jpg", + "table_caption": [ + "Table 2. Region caption performance on RefCOCOg dataset." + ], + "table_footnote": [], + "table_body": "
Method SizePixel-SAIL 0.5BPixel-SAIL 3BSa2VA 4BOMG-LLaVA 7BOsprey 7BGLaMM 7B
METEOR16.017.617.315.316.616.2
", + "bbox": [ + 99, + 508, + 470, + 550 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/174cae71eb7b8d0f28c535d16a8c49f3cb485fdc7e42599eb5170af5aef711ea.jpg", + "table_caption": [ + "Table 3. The performance on our PerBench. Due to the lack of visual prompt understanding capability, LISA scores 0 on all tasks." + ], + "table_footnote": [], + "table_body": "
ModelSizeDetailed Caption METEORMCQ AccV-T RESOverall Score
cIoUgIoU
LISA [27]7B00000
Osprey [74]7B13.40.12008.5
GLaMM [51]7B12.60.1424.314.615.3
Sa2VA [72]4B19.20.7131.921.939.0
Pixel-SAIL0.5B21.40.6929.719.838.4
Pixel-SAIL3B24.20.7433.423.542.2
", + "bbox": [ + 99, + 599, + 470, + 703 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/b98e83ba9dea39282a0c2ade0a8b2df0c60b7033fa236a79e7e6f9ddabb69a51.jpg", + "table_caption": [ + "Table 4. Performance on the VQA benchmarks. $\\star$ refers to the use of an $800^{2}$ resolution, which differs from the $1600^{2}$ resolution in the pre-trained model." + ], + "table_footnote": [], + "table_body": "
ModelLLM SizeMMEMMBenchSEEDMMStar
SOLO0.5B523.2/222.513.845.526.2
SOLO3B1155.7/257/553.465.440.3
EVEv2*7B1128.0/240.760.354.244.9
Pxiel-SAIL0.5B564.1/150.731.852.226.3
Pixel-SAIL3B1187.3/242.956.366.140.1
Pixel-SAIL*7B1081.0/260.458.964.744.3
", + "bbox": [ + 99, + 753, + 470, + 847 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation Setup. For visual prompt understanding and general image QA tasks, we adhere to the same setting as the base MLLM. In the case of segmentation-related tasks, if the model fails to predict a [SEG] token, we compel it", + "bbox": [ + 89, + 863, + 483, + 925 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "to produce a [SEG] token to ensure the generation of the segmentation result.", + "bbox": [ + 511, + 503, + 906, + 532 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Main Results", + "text_level": 1, + "bbox": [ + 511, + 536, + 653, + 551 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results on Referring Segmentation Benchmarks. We compare Pixel-SAIL with other pixel-grounded MLLMs and segmentation specialists on the RefCOCO+ [70], RefCOCOg [70], RefCOCO [25], and gRefCOCO [39] datasets. The comparison results are shown in Tab. 1. Pixel-SAIL 0.5B achieved 70.8, 75.4, and 77.9 cIoU on the validation splits of RefCOCO+, RefCOCOg, and RefCOCO, outperforming all segmentation specialists with comparable model sizes while also maintaining image conversation capabilities. Compared to the classical SAM-based MLLM competitor LISA-7B [27], Pixel-SAIL 0.5B surpassed it by 4.2, 7.9, and 7.8 cIoU on RefCOCO, RefCOCO+, and RefCOCOg respectively, despite having a much smaller model size (0.5B vs. 7B). On the more complex gRefCOCO dataset that includes multi-object segmentation, Pixel-SAIL 0.5B outperformed the carefully designed GSVA-7B [63] by 6.3, 4.8, and 6.5 cIoU on validation, testA, and testB splits respectively.", + "bbox": [ + 509, + 559, + 906, + 830 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "When scaling the model to 3B, Pixel-SAIL achieved 75.7, 78.7, 80.8, and 67.7 cIoU on RefCOCO+, RefCOCOg, RefCOCO, and gRefCOCO datasets respectively, surpassing all larger-sized (7B) MLLMs assisted with vision experts. Pixel-SAIL-3B even outperformed the SOTA Sa2VA-4B [72] (which uses the powerful InternVL2-", + "bbox": [ + 511, + 832, + 908, + 925 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/cc0595ff87d12b9ef8be15175aa560c9ef16a01b5c5aff4e30984f3ad3cd312f.jpg", + "table_caption": [ + "Table 5. Ablation study on the components of Pixel-SAIL. \"RC\" denotes region caption on RefCOCOg dataset." + ], + "table_footnote": [], + "table_body": "
ModelRefCOCO+/gRC
Plain Baseline64.5/57.3/60.11.0
+ Upsampling69.7/62.5/65.30.9
+ Training Data76.2/69.6/73.81.4
+ VP Injection77.4/70.4/75.216.1
+ Distillation77.9/70.8/75.416.0
", + "bbox": [ + 96, + 159, + 316, + 241 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/455e7389ccd133ebb97f68487917667e4abac0f3041ae3af7a16c8d5e4cc749b.jpg", + "table_caption": [ + "Table 6. Ablation study on Base MLLM. The training data only includes LLaVA-665k and Ref-COCO+/g." + ], + "table_footnote": [], + "table_body": "
MLLMSizeRefCOCO/+/g
SOLO0.5B69.7/62.5/65.3
SOLO3B73.2/66.4/69.1
EVEv27B74.9/68.7/71.3
", + "bbox": [ + 331, + 176, + 521, + 239 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/5fafcdafefd607096844ca33bce63f26dc79148a4c06e033e87fdba31edc6979.jpg", + "table_caption": [ + "caption on RefCOCOg dataset." + ], + "table_footnote": [], + "table_body": "
DataRefCOCO+/gRC
Basic Data69.7/62.5/65.3-
+ Seg Data76.2/69.6/73.8-
+ VP Data77.4/70.4/75.216.1
", + "bbox": [ + 529, + 174, + 717, + 232 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/aa0c6a39fab68e9f25a60f16d1d7763c70a4ebb3afdb4147b705e13be16b2639.jpg", + "table_caption": [ + "Table 7. Ablation on the train- Table 8. Ablation study on ing data. \"RC\" denotes region the distillation strategy." + ], + "table_footnote": [], + "table_body": "
DataRefCOCO+/g
w/o Distill77.5/70.5/75.5
M2F77.7/71.0/75.8
SAM277.8/70.9/75.9
Both78.1/70.8/76.1
", + "bbox": [ + 725, + 157, + 898, + 233 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4B [10] and SAM2-L [53]), achieving performance advantages of 1.4 and 2.0 cIoU on the more challenging RefCOCO+ and RefCOCOg datasets respectively.", + "bbox": [ + 89, + 250, + 482, + 295 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results on Visual Prompt Understanding Benchmarks. We evaluate the region caption performance on the RefCOCOg dataset, with results shown in Tab. 2. The training dataset of Pixel-SAIL does not include the RefCOCOg region caption dataset, so we directly evaluate its zero-shot performance. Pixel-SAIL-0.5B achieves a METEOR score of 16.0, surpassing OMG-LLaVA 7B by 0.7 points. When scaling the model to 3B, Pixel-SAIL achieves a METEOR score of 17.6, outperforming carefully designed larger models such as Osprey 7B and GLaMM 7B by 1.0 and 1.4 points respectively.", + "bbox": [ + 89, + 297, + 483, + 463 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results on PerBench. We have benchmarked several popular pixel-grounded MLLMs on our proposed PerBench, with results shown in Tab. 3. LISA [27] scores 0 points across all tasks due to its inability to understand visual prompt inputs. Osprey [74] demonstrates strong object caption capabilities; however, it achieved only 13.4 METEOR in detailed caption tasks and $12.0\\%$ accuracy in MCQ tasks due to limitations from short object caption lengths in its training data and impaired instruction-following ability. GLaMM [51] and Sa2VA [72] both exhibit comprehensive prompt understanding and segmentation capabilities, though GLaMM's weaker instruction-following ability resulted in only $14.0\\%$ accuracy in MCQ tasks. PixelSAIL-0.5B achieves an overall score of 38.4, comparable to Sa2VA-4B despite Pixel-SAIL having a more powerful base MLLM and segmentation expert. Notably, Pixel-SAIL-3B achieves an overall score of 42.2, outperforming Sa2VA-4B across all three tasks.", + "bbox": [ + 89, + 467, + 483, + 738 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results on VQA Benchmarks. We compare the visual question answering performance of Pixel-SAIL with the corresponding base MLLMs on the MME [20], MM-Bench [43], SEED [29], and MMStar [7] benchmarks, and the results are presented in Tab. 4. When the model size is 0.5B, Pixel-SAIL demonstrates performance improvements over the base MLLM across all four benchmarks, particularly on MMBench, where the score increased from 13.8 to 31.8. However, when the model size is 3B and 7B, Pixel-SAIL's performance is on par with that of the base MLLMs, which may be constrained by the current quantity (less than 2M) and quality of visual prompts and segmentation data.", + "bbox": [ + 89, + 742, + 483, + 924 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5e29fcd4523ca0d6da3e40f45f1513abc6492464fdbacd397a078097c046b16e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 247, + 591, + 285 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bf21200e51b1a51097e0d3cc0511c985b0448f7206b378f04cc6aff1759b35af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 596, + 247, + 643, + 285 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b1cbbc683726f798824e46d98003ddc8d8e08482742e724218e33775a000ce62.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 248, + 754, + 285 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ec4c320c8bb602c970cf6d5f5b650ee874030055777473a9e2cfe4a35408d8f6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 247, + 836, + 286 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8514ef98ba120a98f43055533243a21f0110a475a9d3057fb2ce9a607d8d7ff4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 836, + 248, + 900, + 286 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4d78f012581df715eb27cab4d9168bf99ced4bfddc463564b571508b9fb66030.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 292, + 671, + 333 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9b7217328723a584f890fb9982f3e03057486311027c5ff17f4e48f7003ad32e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 292, + 823, + 334 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/aa0956d34ee08556b170855ddc7b77d845501068b701513f9dd4cd5aa1f0b6cb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 823, + 292, + 900, + 334 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a312e96e807c99c7975509f3cc30a8650cb5647a303a336f39244b88fb17a222.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 338, + 671, + 378 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/555602258ffdfb0cda0578a5201c1603ada5da1407bdc869aa0c325320e39050.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 337, + 750, + 378 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fb4f0cc86bc8fa166e3d2d93309b4d2f49a2774966f93cbc596b8fc90285bfc2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 750, + 337, + 900, + 378 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f42004728503f31551f10e09916551225730c7ae2579b188ed67c1f60bb2c337.jpg", + "image_caption": [ + "Figure 4. Visualization results of Pixel-SAIL on diversity tasks. Best view it in color and zoom in. From top to bottom are visual prompt-based object caption, single/multi-object referring segmentation, vision-text referring segmentation, image caption and QA, and visual-prompt based conversation. Visual prompts in the form of points and boxes are converted into mask prompts using SAM [26]. For more visualization results and comparisons with other MLLMs, please refer to the appendix." + ], + "image_footnote": [], + "bbox": [ + 517, + 378, + 650, + 431 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1748944d684d603bdbb18ef6a58cc5f55b83c868283d1ba3cb803734338aa333.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 378, + 750, + 431 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8c0aa32b9248a9481769b0b1ab82aaee0bd5d496ea09b8a99583223e7dc02225.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 750, + 378, + 834, + 433 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8b9e4c68f6ea43f611dad57e5712dd70f8478ec89248fbfbeb3f59bd966df9c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 834, + 378, + 895, + 431 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5a3cb155b0ff4d71d1da04b93abb3ec77b64982962df8f4621fb90b63ba49b49.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 555, + 614, + 595 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c9e0341d3155c0159d45850ef3883c1f855c59deeb7dcf71f646e5a702932d8a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 603, + 614, + 654 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/82720d858e972b556426d0d7a59e765573479959ded0e27c4c7c0c3bcaa4ae1e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 616, + 555, + 707, + 655 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/576a8003516e688ddfec0f59b4c386ac5993c297f63a6a14bea084ff9ff5f7df.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 709, + 555, + 803, + 604 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cec28f82f78a1ff66461359b6cf56928057d0bb8f10e5c03c2af24f3188d7bfc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 805, + 555, + 898, + 604 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ddd809653d8358f3fa9d942f2ed362b8eb1d49d2406959486c808b219fa5bd1b.jpg", + "image_caption": [ + "Figure 5. Image feature visualization results. From left to right are the image feature of the base MLLM, the image feature of Pixel-SAIL, and the mask feature of Pixel-SAIL." + ], + "image_footnote": [], + "bbox": [ + 517, + 656, + 614, + 704 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/956d0f979008f483a5008e483c6d996e4b1db14407264f0f0477c843be8c3d9a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 616, + 656, + 707, + 705 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/80c579cddfe2ed559771ad16801a79afb29c49427d171ce49295ed1addfbe13c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 656, + 803, + 705 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/51a610628940f7450f2cce02aab7c97bd296dcf6d63c3709ae587ebcc9940c11.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 805, + 656, + 897, + 705 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Ablation Studies", + "text_level": 1, + "bbox": [ + 511, + 763, + 678, + 779 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effectiveness of Each Component. We conduct comprehensive ablation studies on the proposed components, with results presented in Tab. 5. Our plain baseline, trained with LLaVA-665k and RefCOCO+/g data, achieves only 64.5, 57.3, and 60.1 cIoU on the RefCOCO, RefCOCO+, and RefCOCOg datasets, respectively. Moreover, this baseline completely fails on the visual prompt understanding task, attaining merely 1.0 METEOR on the region caption task. Upon incorporating the learnable upsampling mod", + "bbox": [ + 511, + 787, + 906, + 924 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ule, segmentation quality improves dramatically, with the model reaching 76.2, 69.6, and $73.8\\mathrm{cIoU}$ on RefCOCO, RefCOCO+, and RefCOCOg. However, the model still cannot effectively interpret user-input visual prompts due to insufficient semantic information in the object representation. When we scale up the training data by introducing substantial amounts of segmentation data and visual-prompt understanding data, the model's segmentation capabilities are further enhanced. Despite scaling the training data, the model continues to struggle with visual prompt inputs because of the limited semantic information in the object representation. After implementing our proposed visual prompt injection mechanism, the model demonstrates significant improvements in visual prompt understanding, achieving 16.1 METEOR on the region caption task. Interestingly, we observe that enhanced visual prompt understanding capabilities positively influence referring segmentation performance. Finally, incorporating the distillation strategy further refines the model's detailed segmentation quality.", + "bbox": [ + 89, + 114, + 485, + 416 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation on Various MLLMs. To demonstrate the effectiveness of Pixel-SAIL, we validate across different architectures and sizes, with results shown in Tab. 6. To reduce training costs, we use only LLaVA-665k and RefCOCO $+ / \\mathrm{g}$ data for training and evaluate on the referring segmentation task. When using our modified 0.5B SOLO as the base MLLM, Pixel-SAIL achieves cIoU scores of 69.7, 62.5, and 65.3 on RefCOCO $+ / \\mathrm{g}$ . When scaling the model size to 3B, Pixel-SAIL's performance improves by 3.5, 3.9, and 3.8 cIoU on RefCOCO $+ / \\mathrm{g}$ . When using EVEv2-7B as the base MLLM, despite the attention between vision tokens changing from full attention to causal attention and the architecture transitioning to an MOE architecture, Pixel-SAIL achieves cIoU scores of 77.4, 70.4, and 75.2 on RefCOCO $+ / \\mathrm{g}$ , demonstrating that performance consistently increases with model scaling.", + "bbox": [ + 89, + 419, + 483, + 661 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation on Data Scaling. Data plays a crucial role in the performance of Pixel-SAIL. As shown in Tab. 7, we conduct comprehensive ablation studies on the training data to evaluate its impact. When trained solely with basic data (including LLaVA-665k and RefCOCO+/g datasets), Pixel-SAIL achieves 69.7, 62.5, and 65.3 cIoU on RefCOCO, RefCOCO+, and RefCOCOg, respectively. Upon scaling the segmentation-related data, Pixel-SAIL demonstrates significant performance improvements of 6.5, 7.1, and 8.5 cIoU on these datasets. Furthermore, incorporating visual prompt data for mixed training not only enhances the model's visual prompt understanding capabilities but also yields additional performance gains of 1.2, 0.8, and 1.4 cIoU on RefCOCO, RefCOCO+, and RefCOCOg, respectively.", + "bbox": [ + 89, + 664, + 483, + 876 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation on Distillation Strategy. Distillation is a highly effective method for infusing knowledge into Pixel-SAIL. We conduct ablation studies on the distillation strategy, and", + "bbox": [ + 89, + 878, + 483, + 925 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "the results are presented in Tab. 8. We use the average cIoU across all splits as the evaluation metric. When only Mask2Former [12] is employed to distill high-resolution mask features, Pixel-SAIL achieves performance gains of 0.2, 0.5, and 0.3 on RefCOCO $+ / \\mathrm{g}$ . When SAM2 [53] is used to distill low-resolution image features, Pixel-SAIL obtains performance improvements of 0.3, 0.4, and 0.4 on RefCOCO $+ / \\mathrm{g}$ . When both teacher models are utilized collaboratively, performance gains of 0.6, 0.3, and 0.5 are achieved. Additionally, the extra computational cost introduced by the distillation strategy is minimal, increasing the training time by only about $5\\%$ for Pixel-SAIL-0.5B.", + "bbox": [ + 511, + 114, + 906, + 297 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Visualization Analysis", + "text_level": 1, + "bbox": [ + 511, + 305, + 720, + 320 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Visual Comparison. In Fig. 4, we showcase Pixel-SAIL's visualization results on diverse tasks. Pixel-SAIL flexibly interprets both visual prompts and text instruction inputs, responding with text and segmentation masks.", + "bbox": [ + 511, + 325, + 906, + 387 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Visual Affinity Map Analysis. We use PCA dimensionality reduction algorithm to visualize vision features, with results shown in Fig. 5. Our Pixel-SAIL's image features (3rd column) are denser and more diverse compared to the base MLLM's image features (2nd column). Pixel-SAIL's mask features, after the upsampling module, are denser and have better segmentation edges. Interestingly, Pixel-SAIL's image features (more focused on understanding, combining factors such as categories, colors, positions, etc.) exhibit different characteristics from mask features (more focused on perception, categories, and instances). As seen in the second row's third and fourth columns, the cars on the left and right have relatively distant feature representations in the image features, while they are very close in the mask features.", + "bbox": [ + 511, + 387, + 908, + 613 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 626, + 633, + 642 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We explore the simplest architecture for pixel-grounded understanding tasks. In particular, we present Pixel-SAIL, which extends current SAIL-like MLLM for fine-grained understanding with three technical improvements (learnable upsampling module, new visual prompt encoding, and segmentor feature distillation). For the first time, our work proves that even without extra visual experts (visual encoder, segmentation models), one single transformer can still achieve stronger performance on four public referring segmentation benchmarks. We further introduce a more challenging benchmark, Perbench, to promote the development of pixel-MLLM community.", + "bbox": [ + 511, + 652, + 908, + 833 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitation and Future Work. Our work provides the simplest solution for pixel-grounded tasks. However, one limitation is that we only adopt 1.7M data for co-training. We will further explore Pixel-SAIL on more data (for example, billion-level masks along with visual prompts [26]) for cotraining.", + "bbox": [ + 511, + 833, + 908, + 924 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 113, + 187, + 128 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966, 2023.", + "[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025.", + "[3] Satanjeev Banerjee and Alon Lavie. Meteor: An automatic metric for mt evaluation with improved correlation with human judgments. ACL, 2005.", + "[4] Mu Cai, Haotian Liu, Siva Karthik Mustikovela, Gregory P. Meyer, Yuning Chai, Dennis Park, and Yong Jae Lee. Making large multimodal models understand arbitrary visual prompts. In CVPR, 2024.", + "[5] Zheng Cai, Maosong Cao, Haojiong Chen, Kai Chen, Keyu Chen, Xin Chen, Xun Chen, Zehui Chen, Zhi Chen, Pei Chu, et al. Internl m2 technical report. arXiv preprint arXiv:2403.17297, 2024.", + "[6] Lin Chen, Jisong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions. arXiv preprint arXiv:2311.12793, 2023.", + "[7] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024.", + "[8] Yangyi Chen, Xingyao Wang, Hao Peng, and Heng Ji. A single transformer for scalable vision-language modeling. TMLR, 2024.", + "[9] Yi-Chia Chen, Wei-Hua Li, Cheng Sun, Yu-Chiang Frank Wang, and Chu-Song Chen. Sam4mllm: Enhance multimodal large language model for referring expression segmentation. ECCV, 2024.", + "[10] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024.", + "[11] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In CVPR, 2024.", + "[12] Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In CVPR, 2022.", + "[13] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In ACL, 2019.", + "[14] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free" + ], + "bbox": [ + 93, + 138, + 480, + 922 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "vision-language models. arXiv preprint arXiv:2406.11832, 2024.", + "[15] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. NeurIPS, 2025.", + "[16] Haiwen Diao, Xiaotong Li, Yufeng Cui, Yueze Wang, Haoge Deng, Ting Pan, Wenxuan Wang, Huchuan Lu, and Xinlong Wang. Eve2: Improved baselines for encoder-free vision-language models. arXiv preprint arXiv:2502.06788, 2025.", + "[17] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In ICCV, 2021.", + "[18] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In ACMMM, 2024.", + "[19] Guang Feng, Zhiwei Hu, Lihe Zhang, and Huchuan Lu. Encoder fusion network with co-attention embedding for referring image segmentation. In CVPR, 2021.", + "[20] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, Yunsheng Wu, and Rongrong Ji. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023.", + "[21] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2022.", + "[22] Louis Martin Hugo Touvron, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, and et al. Llama 2: Open foundation and fine-tuned chat models. arXiv:2307.09288, 2023.", + "[23] Tianrui Hui, Si Liu, Shaofei Huang, Guanbin Li, Sansi Yu, Faxi Zhang, and Jizhong Han. Linguistic structure guided context modeling for referring image segmentation. In ECCV, 2020.", + "[24] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021.", + "[25] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In EMNLP, 2014.", + "[26] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. ICCV, 2023.", + "[27] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In CVPR, 2024.", + "[28] Weixian Lei, Jiacong Wang, Haochen Wang, Xiangtai Li, Jun Hao Liew, Jiashi Feng, and Zilong Huang. The scalability of simplicity: Empirical analysis of vision-language learning with a single transformer. arXiv, 2025.", + "[29] Bohao Li, Yuying Ge, Yixiao Ge, Guangzhi Wang, Rui Wang, Ruimao Zhang, and Ying Shan. Seed-bench: Benchmarking multimodal large language models. In CVPR, 2024." + ], + "bbox": [ + 516, + 114, + 903, + 922 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 946, + 504, + 958 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[30] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024.", + "[31] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In ICML, 2022.", + "[32] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023.", + "[33] Xiangtai Li, Haobo Yuan, Wei Li, Henghui Ding, Size Wu, Wenwei Zhang, Yining Li, Kai Chen, and Chen Change Loy. Omg-seg: Is one model good enough for all segmentation? In CVPR, 2024.", + "[34] Xiaotong Li, Fan Zhang, Haiwen Diao, Yueze Wang, Xinlong Wang, and Ling-Yu Duan. Densefusion-1m: Merging vision experts for comprehensive multimodal perception. arXiv preprint arXiv:2407.08303, 2024.", + "[35] Yanghao Li, Hanzi Mao, Ross Girshick, and Kaiming He. Exploring plain vision transformer backbones for object detection. ECCV, 2022.", + "[36] Chen Liang, Wenguan Wang, Tianfei Zhou, Jiaxu Miao, Yawei Luo, and Yi Yang. Local-global context aware transformer for language-guided video segmentation. arXiv preprint arXiv:2203.09773, 2022.", + "[37] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014.", + "[38] Weifeng Lin, Xinyu Wei, Ruichuan An, Peng Gao, Bocheng Zou, Yulin Luo, Siyuan Huang, Shanghang Zhang, and Hongsheng Li. Draw-and-understand: Leveraging visual prompts to enable mllms to comprehend what you want. arXiv preprint arXiv:2403.20271, 2024.", + "[39] Chang Liu, Henghui Ding, and Xudong Jiang. GRES: Generalized referring expression segmentation. In CVPR, 2023.", + "[40] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2023.", + "[41] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, 2024.", + "[42] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. *Polyformer: Referring image segmentation as sequential polygon generation*. *CVPR*, 2023.", + "[43] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In ECCV, 2024.", + "[44] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint, 2017.", + "[45] Timo Lüddecke and Alexander Ecker. Image segmentation using text and image prompts. In CVPR, 2022.", + "[46] Gen Luo, Xue Yang, Wenhan Dou, Zhaokai Wang, Jifeng Dai, Yu Qiao, and Xizhou Zhu. Mono-internvl: Pushing the" + ], + "bbox": [ + 91, + 114, + 483, + 922 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "boundaries of monolithic multimodal large language models with endogenous visual pre-training. CVPR, 2025.", + "[47] Chuofan Ma, Yi Jiang, Jiannan Wu, Zehuan Yuan, and Xiaojuan Qi. Groma: Localized visual tokenization for grounding multimodal large language models. In ECCV, 2024.", + "[48] Shehan Munasinghe, Hanan Gani, Wenqi Zhu, Jiale Cao, Eric Xing, Fahad Shahbaz Khan, and Salman Khan. Videoglamm: A large multimodal model for pixel-level visual grounding in videos. arXiv preprint arXiv:2411.04923, 2024.", + "[49] Lu Qi, Yi-Wen Chen, Lehan Yang, Tiancheng Shen, Xiangtai Li, Weidong Guo, Yu Xu, and Ming-Hsuan Yang. Generalizable entity grounding via assistance of large language model. arXiv preprint arXiv:2402.02555, 2024.", + "[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021.", + "[51] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M. Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S. Khan. Glamm: Pixel grounding large multimodal model. In CVPR, 2024.", + "[52] Jeff Rasley, Samyam Rajbhandari, Olatunj Ruwase, and Yuxiong He. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In SIGKDD, 2020.", + "[53] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024.", + "[54] Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. In CVPR, 2024.", + "[55] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Austin Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. In NeurIPS, 2024.", + "[56] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. Llama: Open and efficient foundation language models. arXiv:2302.13971, 2023.", + "[57] Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. Cider: Consensus-based image description evaluation. CVPR, 2015.", + "[58] Jiaqi Wang, Pan Zhang, Tao Chu, Yuhang Cao, Yujie Zhou, Tong Wu, Bin Wang, Conghui He, and Dahua Lin. V3det: Vast vocabulary visual detection dataset. In ICCV, 2023.", + "[59] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In CVPR, 2022." + ], + "bbox": [ + 516, + 114, + 903, + 922 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[60] Cong Wei, Haoxian Tan, Yujie Zhong, Yujiu Yang, and Lin Ma. LaSagnA: Language-based segmentation assistant for complex queries. arXiv preprint arXiv:2404.08506, 2024.", + "[61] Jianzong Wu, Xiangtai Li, Shilin Xu, Haobo Yuan, Henghui Ding, Yibo Yang, Xia Li, Jiangning Zhang, Yunhai Tong, Xudong Jiang, Bernard Ghanem, and Dacheng Tao. Towards open vocabulary learning: A survey. arXiv pre-print, 2023.", + "[62] Size Wu, Sheng Jin, Wenwei Zhang, Lumin Xu, Wentao Liu, Wei Li, and Chen Change Loy. F-lmm: Grounding frozen large multimodal models. CVPR, 2025.", + "[63] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In CVPR, 2024.", + "[64] Hu Xu, Saining Xie, Xiaoqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. arXiv preprint arXiv:2309.16671, 2023.", + "[65] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024.", + "[66] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024.", + "[67] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022.", + "[68] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022.", + "[69] Zuyao You, Junke Wang, Lingyu Kong, Bo He, and Zuxuan Wu. Pix2cap-coco: Advancing visual comprehension via pixel-level captioning. arXiv preprint arXiv:2501.13893, 2025.", + "[70] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In ECCV, 2016.", + "[71] Haobo Yuan, Xiangtai Li, Chong Zhou, Yining Li, Kai Chen, and Chen Change Loy. Open-vocabulary sam: Segment and recognize twenty-thousand classes interactively. arXiv preprint, 2024.", + "[72] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and" + ], + "bbox": [ + 91, + 114, + 483, + 924 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv preprint arXiv:2501.04001, 2025.", + "[73] Haobo Yuan, Tao Zhang, Xiangtai Li, Lu Qi, Zilong Huang, Shilin Xu, Jiashi Feng, and Ming-Hsuan Yang. 4th pvuw mevis 3rd place report: Sa2va. arXiv preprint arXiv:2504.00476, 2025.", + "[74] Yuqian Yuan, Wentong Li, Jian Liu, Dongqi Tang, Xinjie Luo, Chi Qin, Lei Zhang, and Jianke Zhu. Osprey: Pixel understanding with visual instruction tuning. In CVPR, 2024.", + "[75] Alireza Zareian, Kevin Dela Rosa, Derek Hao Hu, and Shih-Fu Chang. Open-vocabulary object detection using captions. In CVPR, 2021.", + "[76] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023.", + "[77] Pan Zhang, Xiaoyi Dong, Bin Wang, Yuhang Cao, Chao Xu, Linke Ouyang, Zhiyuan Zhao, Shuangrui Ding, Songyang Zhang, Haodong Duan, Wenwei Zhang, Hang Yan, Xinyue Zhang, Wei Li, Jingwen Li, Kai Chen, Conghui He, Xingcheng Zhang, Yu Qiao, Dahua Lin, and Jiaqi Wang. Internlm-xcomposer: A vision-language large model for advanced text-image comprehension and composition. arXiv preprint arXiv:2309.15112, 2023.", + "[78] Tao Zhang, Xingye Tian, Yu Wu, Shunping Ji, Xuebo Wang, Yuan Zhang, and Pengfei Wan. DVIS: Decoupled video instance segmentation framework. In ICCV, 2023.", + "[79] Tao Zhang, Xingye Tian, Yikang Zhou, Shunping Ji, Xuebo Wang, Xin Tao, Yuan Zhang, Pengfei Wan, Zhongyuan Wang, and Yu Wu. Dvis++: Improved decoupled framework for universal video segmentation. arXiv preprint arXiv:2312.13305, 2023.", + "[80] Tao Zhang, Xiangtai Li, Hao Fei, Haobo Yuan, Shengqiong Wu, Shunping Ji, Change Loy Chen, and Shuicheng Yan. Omg-llava: Bridging image-level, object-level, pixel-level reasoning and understanding. In NeurIPS, 2024.", + "[81] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In CVPR, 2024.", + "[82] Yikang Zhou, Tao Zhang, Shilin Xu, Shihao Chen, Qianyu Zhou, Yunhai Tong, Shunping Ji, Jiangning Zhang, Xiangtai Li, and Lu Qi. Are they the same? exploring visual correspondence shortcomings of multimodal llms. arXiv preprint arXiv:2501.04670, 2025." + ], + "bbox": [ + 516, + 114, + 903, + 736 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Pixel-SAIL: Single Transformer For Pixel-Grounded Understanding", + "text_level": 1, + "bbox": [ + 153, + 108, + 843, + 131 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Supplementary Material", + "bbox": [ + 382, + 142, + 614, + 162 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We first present more details on training and testing of our Pixel-SAIL in Sec. 6. Then, we present the detailed benchmark building process, in Sec. 7 and more challenging examples in PerBench in Sec. 8. Next, we present more comparison with current state-of-the-art pixel-grounded MLLMs, in Sec. 9.", + "bbox": [ + 89, + 180, + 482, + 270 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6. More Detailed Training and Testing", + "text_level": 1, + "bbox": [ + 89, + 287, + 413, + 306 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Training. We will present more details about the training, including dataset sampling specifications and distillation methodology. For the RefCOCO series [25, 70] datasets, we randomly sample 5 referring expressions per image and organize them into a multi-round dialogue format as a single training data point, processing all images for four epochs. For COCO [37] data, we sample 5 categories per image and randomly select either instance mode or semantic mode to structure the responses. In instance mode, objects are arranged by their center points from left to right. We process the COCO dataset for one epoch. For Pixel2Cap [69], our generated detailed object caption data, and Osprey [74] object description data, we randomly sample 1-5 visual prompts per image and randomly incorporate questions about non-existent visual prompts, with responses indicating that these visual prompts do not exist. These object caption datasets are processed for five epochs. For other segmentation-related or visual prompt-related data, we conduct one epoch. For LLaVA-665k, we randomly sample at a 1:1 ratio alongside other data for joint training to ensure that the base MLLM's instruction-following capability remains intact.", + "bbox": [ + 91, + 315, + 482, + 646 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "When the length of input tokens (including the length of vision tokens) exceeds 8192, we truncate the excess portion. For the 0.5B model, we use DeepSpeed Zero-1 [52] for training, and for the 3B and 7B models, we use DeepSpeed Zero-2 [52] for training.", + "bbox": [ + 89, + 648, + 482, + 724 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We distill the mask features generated by the Mask2Former [12] pixel decoder and the lowest resolution features generated by the SAM2 [53] image encoder onto the upsampled mask features from Pixel-SAIL and the image features directly reshaped from vision tokens, respectively. We use bilinear interpolation to align spatial dimensions and implement a learnable linear layer to align the channel size. The distillation process employs MSE loss with a weight of 0.5.", + "bbox": [ + 89, + 726, + 482, + 861 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Testing. We have elaborated on the testing details of Pixel-SAIL on pixel-grounded benchmarks in the main text. For general image question answering benchmarks, we follow the prompt settings of the base MLLMs and use", + "bbox": [ + 89, + 863, + 482, + 922 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "VLMEvalKit [18] for evaluation, without using additional LLM assistance to identify answers.", + "bbox": [ + 511, + 180, + 903, + 210 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "7. More Detailed Process on Benchmarking Building", + "text_level": 1, + "bbox": [ + 511, + 223, + 903, + 258 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The construction of PerBench combines an automated model-generated pipeline with manual screening, correction, and annotation. The process is divided into three stages.", + "bbox": [ + 511, + 266, + 903, + 325 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The first stage involves annotating detailed object captions. We crop objects and draw visual prompts on the original images to prompt InternVL-2.5 78B [10] and Qwen-VL 2.5 72B [2] to generate detailed captions for the objects. These captions are then cross-validated using Qwen2.5 72B [66]. If all captions are consistent, they are integrated using an LLM; otherwise, the data are discarded. After the model automatically generates the detailed object captions, we manually select and correct 500 of them to form the final 500 detailed object caption data points in the benchmark.", + "bbox": [ + 511, + 327, + 903, + 477 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The second stage focuses on annotating visual-prompt question-answering data in an MCQ (Multiple Choice Question) format. In this phase, we manually generate a multiple-choice question for each object caption obtained from the first stage. After completing the annotations, two quality control specialists perform cross-verification to identify and rectify any potential errors.", + "bbox": [ + 511, + 479, + 903, + 583 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The final stage contains the annotation of visual-text referring segmentation data. At this stage, we manually select and annotate object segmentation masks, referring visual prompts, and text from SAM images. During the annotation process, we consider various factors such as positional relationships, event relationships, appearance, size, and more, including cases with both single and multiple visual prompts. Once the annotation is complete, two individuals review it, and correct the errors.", + "bbox": [ + 511, + 584, + 903, + 717 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "8. More Challenging Cases in PerBench", + "text_level": 1, + "bbox": [ + 511, + 732, + 848, + 750 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We present more detailed object caption samples from our PerBench in Fig. 6. The objects are derived from diverse senses and categories, encompassing humans, man-made objects, and natural landscapes. The object captions include basic categories, attributes, purposes, and relationships with surrounding objects. This high-quality benchmark will effectively advance the development of the visual prompt understanding community.", + "bbox": [ + 511, + 757, + 903, + 877 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "More referring segmentation samples are illustrated in Fig. 7. Our manually annotated samples cover a variety of scenes, such as indoor and outdoor settings, and include", + "bbox": [ + 511, + 878, + 903, + 922 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 946, + 503, + 958 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/54f00460d44c59b795de71e7634a25e6d01f0a7ce2b703a11af4760339be0174.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 102, + 113, + 276, + 229 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The object is a person standing behind a wooden podium covered with a blue cloth, addressing an audience outdoors. The podium has an emblem, and the person is dressed in a dark blue jacket with a logo, a scarf, and a white shirt. The audience includes people seated on a stool and a folding chair, with trees, parked cars, and a building with large windows visible in the background.", + "bbox": [ + 277, + 116, + 509, + 267 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/26dbd4c246279b4fc314092ff22dad26f9b64219d0101f942ada95ce6e793161.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 514, + 113, + 616, + 229 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The object is a bronze statue of a young boy with exaggerated features. The statue is dressed in a formal outfit including a suit jacket, vest, and shorts, and it wears flip-flops. The boy holds a small object resembling a ball in his right hand. The statue's whimsical and playful appearance, characterized by exaggerated proportions and a sense of movement, is set on a rock-like structure in an urban street scene with a pedestrian crossing visible in the background.", + "bbox": [ + 617, + 117, + 883, + 280 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The object is a Smart Fortwo, a compact city car known for its small size and maneuverability. This car features predominantly white with green accents, including green wheels and trim. Designed for urban environments with limited parking space, it is parked on a sidewalk next to a charging station, indicating its electric vehicle status. The charging cable is connected, suggesting it is currently being charged. The surrounding area includes a paved road with multiple lanes and a grassy area separated by a curb.", + "bbox": [ + 630, + 280, + 892, + 455 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/2186d7b541c3ac8b0b1e6f888aabb7d0139e70b68d4899e20fc2e63d36c16d9c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 102, + 284, + 251, + 449 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The object is a person dressed in black and blue attire, standing on skis in a snowy outdoor setting, preparing for or participating in a skiing event. This person is wearing a black beanie, a black puffy jacket, black gloves, black pants, and a blue bib with the number \"1\" in red. They also have ski poles and ski boots. In the background, there are other people skiing or standing around, along with parked cars and trees, indicating a recreational or competitive skiing area.", + "bbox": [ + 254, + 281, + 478, + 458 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/52758840947ce05f90ecc252c8b5c5818b4964a9f94be12806665b38888ffaa2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 480, + 314, + 629, + 398 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/3a6e35850e62808d80931bfa589b802dcb84f7e252ccaf4415e54f89948ea1b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 106, + 460, + 254, + 630 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The object is a stack of white bangles adorned with small, colorful stones. These bangles, part of traditional jewelry, are worn on the arm of a person dressed in vibrant traditional attire, including a yellow scarf with colorful patterns and a floral-patterned garment, complementing the overall colorful and cultural appearance.", + "bbox": [ + 254, + 464, + 442, + 626 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/7300f72a106255187cf4c86e7b5a5dc72806c38f843ff35d4057d0d97f0a0ca6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 447, + 497, + 627, + 592 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The object is a waterfall cascading down a steep, rocky cliff. The waterfall serves as the central focus, with water flowing smoothly from top to bottom. The area around the waterfall features rugged, uneven rock surfaces and patches of green vegetation. The cliff is part of a larger mountainous or hilly terrain, with dense foliage at the top. The waterfall's flow and movement create a striking contrast against the dark, textured rocks, highlighting the natural beauty of the scene.", + "bbox": [ + 629, + 460, + 890, + 619 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/25c68c43191bca0b4140046d5bba17dabfcc96d22a4b3288b17449104cd462b6.jpg", + "image_caption": [ + "Figure 6. More visualization examples of detailed object captions from our PerBench." + ], + "image_footnote": [], + "bbox": [ + 102, + 660, + 305, + 767 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The object is a black shoulder bag with a flap closure. It is made of a synthetic or leather-like material, has a smooth texture and sheen, and is carried over the shoulder of a person walking in a grassy area near a wooden fence and a small white stool, dressed in a black top and pink shorts.", + "bbox": [ + 308, + 633, + 459, + 806 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/9440ef894fa079e5258e6dfd1b6a1c3ddfd1826198e0db08f174e3db7c0242e4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 468, + 651, + 655, + 746 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The object is a navigational buoy used in maritime environments. It is a cylindrical structure with a pointed top, primarily white in color, featuring a pink top and a distinctive band. Mounted on a black base, it is situated in a body of water such as a harbor or marina, surrounded by other boats and buoys. Its function is to guide vessels through waterways, aiding navigation with its unique color pattern. This buoy is part of a larger maritime setting, often near populated areas or popular boating destinations.", + "bbox": [ + 656, + 619, + 895, + 806 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "objects of multiple granularities. The referring text encompasses positional relationships, event relationships, and more. This new task is more challenging than current pure text referring segmentation tasks.", + "bbox": [ + 89, + 859, + 482, + 921 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Q: Please segment which object $\\langle \\text{vp\\_0} \\rangle$ is using. A: [SEG] Q: Please segment the $\\langle \\text{vp\\_0} \\rangle$ that is sitting. A: [SEG]", + "bbox": [ + 104, + 111, + 880, + 126 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/637e2c6c160d921c8e8eb7a912d02ef64fb5d5f496a09547497acdb12cb96449.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 102, + 127, + 501, + 232 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/b3cba3595b4d3ca756e3999aab583c70696f7b48d9f22af4691894b857494ece.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 127, + 887, + 232 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Q: Please segment where will arrive. A: [SEG]", + "bbox": [ + 104, + 252, + 472, + 266 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Q: Please segment the object that blocks the sunlight for $<\\mathrm{vp\\_0}$ . A: [SEG]", + "bbox": [ + 506, + 239, + 826, + 267 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/c68060ea861a6cbea6396bbf326c799ca8df216d62e6c88c6bcb08edd708f8c3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 102, + 267, + 501, + 371 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/a18d58ffa3b9e5997496a5e5cb52fc23eae362b5fdf646c42c3a31b431a2d3c0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 267, + 893, + 371 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Q: Please segment the letter closest to . \nA: [SEG]", + "bbox": [ + 104, + 372, + 437, + 398 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Q: Please segment the object that is enjoying. \nA: [SEG]", + "bbox": [ + 504, + 372, + 879, + 398 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/21d42f32578b84b87baec4d382d3e198b1d4bb07238b436c05ce0aa66b7c57da.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 102, + 400, + 500, + 503 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/29ce58b437a5916dea640e00806154a31abae29e90df52edf2d67809c927bd7a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 401, + 895, + 503 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Q: Please segment the person who is holding $\\langle \\text{vp\\_0} \\rangle$ . A: [SEG]", + "bbox": [ + 104, + 511, + 359, + 537 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Q: Please segment the tree between $\\langle \\text{vp}_0 \\rangle$ and $\\langle \\text{vp}_1 \\rangle$ . A: [SEG]", + "bbox": [ + 390, + 521, + 839, + 537 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/88b02944cee3f7b4a58e0b3ffd50ae70f9295df610d6fcc16d105ed2bf1d5cd1.jpg", + "image_caption": [ + "Figure 7. More visualization examples of vision-text referring segmentation from our PerBench." + ], + "image_footnote": [], + "bbox": [ + 102, + 539, + 344, + 679 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/cff4a47f203768f0842f1df33e93ff0f70dc45da26e5754a5f9e6cd0db753944.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 374, + 545, + 890, + 679 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "9. More Comparison With SOTA Pixel-Grounded MLLM", + "text_level": 1, + "bbox": [ + 89, + 731, + 482, + 763 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We conduct a qualitative comparative analysis with the SOTA pixel-grounded MLLM, Sa2VA [72], and present the visualization results in Fig. 8. We observe that both Pixel-SAIL and Sa2VA achieve excellent results in most cases. However, Sa2VA performs significantly weaker than Pixel-SAIL in certain scenarios, despite utilizing the much more powerful InternVL2.5 [10] compared to our base encoder-free MLLM [8]. In the left examples, Sa2VA performs notably worse than Pixel-SAIL in multi-object segmentation", + "bbox": [ + 89, + 787, + 483, + 924 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "tasks. Additionally, in the right example, Sa2VA demonstrates significantly weaker attention to non-core areas of the image, such as edges, compared to Pixel-SAIL, leading to frequent failures in segmenting objects near image boundaries.", + "bbox": [ + 511, + 733, + 906, + 808 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 946, + 503, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Please segment the two women.", + "bbox": [ + 99, + 441, + 158, + 494 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Please segment all the mans in right side.", + "bbox": [ + 101, + 512, + 158, + 594 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/c6d0d68534eafff67d8719670cfe877faf018304f100380c905e6b410dd5f80d.jpg", + "image_caption": [ + "Pixel-SAIL" + ], + "image_footnote": [], + "bbox": [ + 163, + 417, + 330, + 505 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/adbf3fb6b7b5c39d5eb61a072abe5dc979e86526a712b7224538858be92e6be9.jpg", + "image_caption": [ + "Sa2VA" + ], + "image_footnote": [], + "bbox": [ + 331, + 417, + 500, + 505 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Please segment the flower.", + "bbox": [ + 504, + 416, + 555, + 494 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/1670b5aa11ea16ec862993f96f4c48d8d423050174dfe97332d793ccceb7b98f.jpg", + "image_caption": [ + "Figure 8. Visualization Comparison of Sa2Va and Pixel-SAIL." + ], + "image_footnote": [], + "bbox": [ + 163, + 510, + 331, + 597 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/aebf36c684ad3f3508655df02db8926f2ddf1297a2aafa83ac26c6da0953d95d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 510, + 500, + 597 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Please segment the person sitting on the floor.", + "bbox": [ + 504, + 508, + 553, + 604 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/86b860bdc87c86e23424a603ebb6acb9c8d1a465e1f1714a657528991691867d.jpg", + "image_caption": [ + "Pixel-SAIL" + ], + "image_footnote": [], + "bbox": [ + 557, + 417, + 725, + 505 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/4617a9ba01a62e1555c8eda6e19463c940cd456014879986d1c05a09455702ec.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 725, + 417, + 895, + 505 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/5b4146250237eff7028b009970b91b0f8647976b0af9572a8aa9a1d2312d4f3c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 557, + 510, + 725, + 599 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/0b7ffde63325f0b51fb4beebc982298a3c7a6c5d44f1cdeb9a7612bf4dab0654.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 725, + 510, + 895, + 599 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 503, + 958 + ], + "page_idx": 14 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10465/9d3901f2-eace-4793-8815-51f41b459e25_model.json b/data/2025/2504_10xxx/2504.10465/9d3901f2-eace-4793-8815-51f41b459e25_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a0ab32773ca7fa9a82150e369580737fbdae09a8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/9d3901f2-eace-4793-8815-51f41b459e25_model.json @@ -0,0 +1,3387 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.091, + 0.089, + 0.319, + 0.111 + ], + "angle": 0, + "content": "ByteDance | Seed" + }, + { + "type": "title", + "bbox": [ + 0.154, + 0.131, + 0.843, + 0.154 + ], + "angle": 0, + "content": "Pixel-SAIL: Single Transformer For Pixel-Grounded Understanding" + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.179, + 0.834, + 0.234 + ], + "angle": 0, + "content": "Tao Zhang\\(^{1,2}\\) Xiangtai Li\\(^{1}\\) Zilong Huang\\(^{1}\\) Yanwei Li\\(^{1}\\) Weixian Lei\\(^{1}\\) Xueqing Deng\\(^{1}\\) Shihao Chen\\(^{2}\\) Shunping Ji\\(^{2}\\) Jiashi Feng\\(^{1}\\) Bytedance Seed\\(^{2}\\) WHU" + }, + { + "type": "text", + "bbox": [ + 0.22, + 0.236, + 0.796, + 0.253 + ], + "angle": 0, + "content": "Project Page: https://zhang-tao-whu.github.io/project/pixelsail" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.283, + 0.378, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.125, + 0.441, + 0.352, + 0.453 + ], + "angle": 0, + "content": "(a), Multi-modal Fusion with extra decoders" + }, + { + "type": "image", + "bbox": [ + 0.409, + 0.27, + 0.648, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.434, + 0.441, + 0.632, + 0.454 + ], + "angle": 0, + "content": "(b), MLLM with segmentation experts" + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.285, + 0.857, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.677, + 0.441, + 0.9, + 0.454 + ], + "angle": 0, + "content": "(c), Pixel SAIL with one single transformer" + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.465, + 0.908, + 0.508 + ], + "angle": 0, + "content": "Figure 1. Comparison of current MLLMs for pixel-wise understanding with our method. (a) and (b). Current MLLMs for pixel-wise understanding feature highly complex system architectures, including an LLM, a CLIP-like vision backbone, an object token extraction model, a segmentation vision backbone, and a SAM-like decoder. (c). Our method employs only a single transformer." + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.522, + 0.327, + 0.537 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.555, + 0.485, + 0.903 + ], + "angle": 0, + "content": "Multimodal Large Language Models (MLLMs) achieve remarkable performance for fine-grained pixel-level understanding tasks. However, all the works rely heavily on extra components, such as vision encoder (CLIP), segmentation experts, leading to high system complexity and limiting model scaling. In this work, our goal is to explore a highly simplified MLLM without introducing extra components. Our work is motivated by the recent works on Single trTransformer as a unified vVision-Language Model (SAIL) design, where these works jointly learn vision tokens and text tokens in transformers. We present Pixel-SAIL, a single transformer for pixel-wise MLLM tasks. In particular, we present three technical improvements on the plain baseline. First, we design a learnable upsampling module to refine visual token features. Secondly, we propose a novel visual prompt injection strategy to enable the single transformer to understand visual prompt inputs and benefit from the early fusion of visual prompt embeddings and vision tokens. Thirdly, we introduce a vision expert distillation strategy to efficiently enhance the single transformer's fine-grained feature extraction capability. In addition, we have collected a comprehensive pixel understanding benchmark (PerBench), using a manual check. It includes three tasks:" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.523, + 0.907, + 0.644 + ], + "angle": 0, + "content": "detailed object description, visual prompt-based question answering, and visual-text referring segmentation. Extensive experiments on four referring segmentation benchmarks, one visual prompt benchmark, and our PerBench show that our Pixel-SAIL achieves comparable or even better results with a much simpler pipeline. Code and model will be released at https://github.com/magicresearch/Sa2VA." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.651, + 0.645, + 0.665 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.676, + 0.907, + 0.812 + ], + "angle": 0, + "content": "Multi-modal Large Language Models (MLLMs) have garnered significant research efforts, driven by advancements of Large Language Models (LLMs) [22, 56, 65]. While most studies focus on open-ended visual question answering tasks, there is a growing interest [51, 80] in fine-grained, pixel-level understanding. This enables broader applications, such as facilitating precise region-level editing and generation and achieving precise understanding of designated mask regions." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.812, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Recent pixel-wise MLLMs [27, 51, 54, 63, 72, 80, 81] mainly adopt visual and language fusion frameworks, following design patterns [17, 42, 68] established before the LLM era. For example, LAVIT [68] adopts encoder-fusion approach, injecting language embedding (generated by BERT [13]) into vision transformers. With the advent of" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.262, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.10465v1 [cs.CV] 14 Apr 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.114, + 0.482, + 0.265 + ], + "angle": 0, + "content": "LLMs [22, 65, 66], recent works [27, 54, 72, 80] integrate state-of-the-art segmentation models [26, 33, 53], for pixel-level understanding, by either appending them to LLM outputs or embedding LLM within segmentation pipelines. While effective, the overall architectures are complex, requiring specialized components such as vision-language fusion modules and additional decoders. Moreover, their final performance often heavily depends on either MLLMs or the segmentation models, which may lead to suboptimal results due to limitations within individual submodules." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.268, + 0.482, + 0.465 + ], + "angle": 0, + "content": "In this work, we explore a novel, simple yet effective pixel-wise MLLM design, drawing inspiration from recent advancements in SAIL architecture, which is also called Encoder-free MLLMs. These methods drop the extra vision encoder and jointly co-train vision and language tokens on large scale datasets, with a simpler design. Moreover, they show competitive performance on image-level VQA tasks, compared with LLaVA. Motivated by this success, we extend the framework to pixel-level understanding tasks, aiming to reduce the complexity of existing approaches. To the best of our knowledge, this is the first study to explore the simplest architecture for pixel-wise MLLM tasks, including referring segmentation and visual prompt understanding." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.467, + 0.482, + 0.663 + ], + "angle": 0, + "content": "We first directly extend SAIL architecture by adding segmentation token and visual prompt tokens to generate segmentation masks and output region caption, following previous works [27, 51, 74]. However, this leads to inferior results on both segmentation and visual prompt understanding. Several reasons are: (1), The misalignments on high resolution features since there are no segmentation decoders since SAIL directly reshape the vision tokens into features. (2), Previous works directly adopt mask pooling on high level visual tokens where SAIL baseline only maps RGB inputs with one projection layer, where most tokens are low level features. (3), The mask quality is low since no segmentation experts are involved." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.666, + 0.482, + 0.892 + ], + "angle": 0, + "content": "To solve these problems, we present three simple technical improvements, which lead to our Pixel-SAIL framework. First, we design a simple learnable up-sampling module to refine the low resolution visual tokens in high resolution features. Our goal is to keep the design as simple as possible, where only one transposed 2D convolution is involved. Then, for visual prompt understanding, we design a novel visual prompt injection method, where we map the visual prompts into special text tokens without introducing extra visual prompt encoder in the middle stage of SAIL. Next, we propose to distill the previous segmentation experts into SAIL to improve mask quality. All the improvements are plug-in-play, and we verify the effectiveness on various SAIL architectures, including SOLO [8] and EVEv2 [16]." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.894, + 0.482, + 0.924 + ], + "angle": 0, + "content": "Then, to further indicate the effectiveness of our Pixel-SAIL and facilitate the development of pixel-LLM com" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.115, + 0.905, + 0.311 + ], + "angle": 0, + "content": "munity, we further design a new challenging benchmark, PerBench. Compared with previous pixel-wise MLLM benchmarks, we have three innovative and challenging features. First, we include a detailed object caption where most existing benchmarks only contain short captions without fine-gained contents. Secondly, we re-evaluate visual-prompt understanding as multi-choice VQA tasks following MME [20] and MMBench [43] to achieve more accurate region caption evaluation. Thirdly, we introduce a task by segmenting objects jointly referenced by visual prompts and text. Our benchmark reveals the limitation of current state-of-the-art pixel-wise MLLM on fine-grained understanding and mixed referring tasks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.312, + 0.905, + 0.493 + ], + "angle": 0, + "content": "Pixel-SAIL is jointly co-trained with mixed data engine on referring segmentation datasets, VQA datasets, and visual prompt datasets. Experimental results show that our method can achieve better results on five pixel-wise benchmarks. In particular, on RefCOCOg and RefCOCO+ datasets, our method with 3B size can outperform previous pixel MLLMs, including GLaMM (7B) and OMG-LLaVA (7B), by \\(1.5 - 3.0\\%\\) with a simpler pipeline. On our Per-Bench, our method achieves 24.2 METEOR, \\(74\\%\\) accuracy, 33.4 cIoU and 42.2 overall score, surpassing the SOTA MLLMs GLaMM (7B) and Sa2VA (4B) with overall scores of 26.9 and 3.2, respectively." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.507, + 0.655, + 0.523 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.532, + 0.905, + 0.925 + ], + "angle": 0, + "content": "Large Vision Language Models. Staring from CLIP [50] and ALIGN [24], modern vision language models have adopted contrastive learning on large-scale image-text datasets for learning vision-text aligned representations. The trained models are also proven to work well on open-vocabulary perception, such as segmentation [45, 71, 78, 79] and detection [21, 58, 61, 75]. The following works [31, 32, 64, 76] share the same network design, exploring modified loss functions and targeting data quality and filtering. Then, with the rise of large language models [5, 22, 56, 65], recent works [1, 10, 11, 40, 55, 77] mainly focus on multimodal large language models for open-ended settings, such as visual question answering or OCR benchmarks. On representative work, LLaVA [40], uses the CLIP to encode images into visual tokens and sends the visual tokens to LLMs. After that, the following works [1, 30, 41] improve designs with scaled high quality datasets, images, and videos constraining. Meanwhile, several recent works [8, 14, 16, 46] also explore the visual encoder-free designs, which jointly learn the image and text representation in a single transformer architecture. For example, SOLO [8] collects mixed language and vision datasets and trains one transformer for VQA tasks, while EVE [14] designs a CLIP supervision to enhance visual token learning. Our work follows the visual encoder-free design, and we go a step further by exploring pixel-grounded understanding tasks, including ground" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.482, + 0.16 + ], + "angle": 0, + "content": "ing tasks and visual prompt understanding. To our knowledge, we are the first to apply encoder-free architecture for pixel-grounded understanding tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.162, + 0.483, + 0.417 + ], + "angle": 0, + "content": "Referring Expression Segmentation. This task outputs specific masks driven by text description. Earlier works [19, 23, 36, 39, 67] explore various fusion architecture and modules to enhance text and vision feature alignments. Equipped with LLMs, several recent advanced works [27, 48, 49, 51, 63, 72, 73, 80, 82] propose more complex referring tasks, including reasoning referring or joint mask and caption generation. In particular, LISA [27] involves complex expression while GLaMM [51] annotates a new dataset and proposes region-level caption and segmentation tasks. However, all these works contain complex designs: extra vision encoders, segmentation encoders, mask decoders, and prompt encoders. Our method, Pixel-SAIL, only has one transformer to jointly learn the joint visual and language feature. With proposed data engine and improved methods, Pixel-SAIL achieves good results with much simpler architecture." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.419, + 0.483, + 0.586 + ], + "angle": 0, + "content": "Visual Prompt Understanding. Understanding visual prompts plays an important role when building interaction between VLMs and human. Recent works [4, 38, 47, 51, 74] build new visual prompt datasets for region caption generation and prompt-aware VQA tasks. ViP-LLaVA [4] overlays the visual prompts directly onto the image canvas and fine-tunes the LLaV on a specific visual prompt dataset, while Osprey [74] explores pixel-wise mask regions into language instructions. Our method can also be extended into visual prompt understanding with our proposed prompt token injection design." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.603, + 0.182, + 0.618 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.628, + 0.44, + 0.644 + ], + "angle": 0, + "content": "3.1. Encoder Free MLLM and Plain Baseline" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.651, + 0.483, + 0.788 + ], + "angle": 0, + "content": "Recently, several encoder-free MLLMs [8, 15, 16, 46] achieve comparable performance with those extra vision encoders. These models jointly learn vision and text features in a single transformer, with much simpler architecture. In particular, SOLO uses a simple project layer to map the image into visual tokens and then combines language tokens as the inputs of the transformer. However, no works have explored such new architecture for fine-grained vision language tasks (region caption, referring masks)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.789, + 0.483, + 0.925 + ], + "angle": 0, + "content": "Plain Baseline. To fill this gap, we first construct a plain single transformer baseline, motivated by the previous ViT-based MLLMs [27, 72]. We start it with a pre-trained encoder-free MLLM. For segmentation tasks, we modify previous mask generation methods into the single transformer. First, we reshape the hidden states of the last transformer layer of vision tokens \\(\\mathcal{V} \\in \\mathbb{R}^{N \\times C}\\) into image features \\(\\mathcal{F} \\in \\mathbb{R}^{\\frac{H}{S} \\times \\frac{W}{S} \\times C}\\). \\(N\\) represents the number of vision tokens, \\(C\\) denotes the channel size, \\(H\\) and \\(W\\) indicate" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.115, + 0.905, + 0.342 + ], + "angle": 0, + "content": "the height and width of the image, \\(S\\) stands for the down-sampling stride. Then, the image features are then crossmultiplied with the hidden states of the predicted segmentation token \\(\\mathcal{Q} \\in \\mathbb{R}^{K \\times C}\\) to generate the segmentation masks \\(\\mathcal{M} \\in \\mathbb{R}^{K \\times \\frac{H}{S} \\times \\frac{W}{S}}\\). \\(K\\) signifies the number of predicted segmentation tokens, following previous works [27, 51]. For visual prompt understanding, we employ a pooling-based method [74] to derive object representations \\(\\mathcal{O} \\in \\mathbb{R}^{M \\times C}\\) from image patch embeddings \\(\\mathcal{P} \\in \\mathbb{R}^{\\frac{H}{P} \\times \\frac{W}{P} \\times C}\\). These object embeddings are fed into the single transformer to represent the corresponding objects. \\(M\\) represents the number of visual prompts, and \\(P\\) denotes the patch size. For segmentation tasks, we adopt extra mask loss. Otherwise, we adopt the same text loss for VQA tasks and visual prompt understanding tasks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.343, + 0.907, + 0.525 + ], + "angle": 0, + "content": "Limitation. The plain baseline demonstrates a certain level of pixel-text alignment capability since both segmentation token and visual prompt token are jointly learned with vision and language tokens. However, the plain baseline exhibits several significant shortcomings: 1) The segmentation mask quality is poor due to the large feature down-sampling stride (16 or 32), even when using simple pixel shuffle or bilinear interpolation for up-sampling. 2) The single transformer struggles to comprehend the referential target of object representation, as the object representation is summarized from image patch embeddings with poor semantic information." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.538, + 0.702, + 0.552 + ], + "angle": 0, + "content": "3.2. Pixel-SAIL Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.561, + 0.905, + 0.802 + ], + "angle": 0, + "content": "Given the substantial shortcomings, the performance of plain baseline in fine-grained pixel understanding tasks falls significantly, compared to vision-expert competitors (Sec.4). To solve these challenges, we have implemented three key enhancements to the baseline architecture. First, we integrate a learnable up-sampling module to fully exploit the segmentation capabilities of the single transformer architecture. Second, we develop an innovative visual prompt injection mechanism that facilitates effective interpretation of visual prompt inputs. Our method enables early-stage fusion between vision tokens and visual prompt embeddings. Finally, we introduce a dense feature distillation strategy that significantly improves the model's capacity for extracting fine-grained visual features. These improvements collectively address the shortcomings of the plain baseline while maintaining its architectural simplicity." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.804, + 0.905, + 0.925 + ], + "angle": 0, + "content": "Learnable Up-sampling Module. Inspired by [35], we also incorporate a simple learnable up-sampling model \\(\\mathcal{U}\\) to generate the high-resolution features \\(F_{h} \\in \\mathbb{R}^{\\frac{H}{4} \\times \\frac{W}{4} \\times C}\\) essential for pixel-level grounding. The up-sampling module comprises multiple up-sampling blocks, each consisting of a transposed 2D convolution followed by a depth-wise convolution. It effectively upscales the low-resolution features \\(F_{l} \\in \\mathbb{R}^{\\frac{H}{S} \\times \\frac{W}{S} \\times C}\\), derived from resized vision tokens," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.125, + 0.112, + 0.874, + 0.322 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.331, + 0.908, + 0.376 + ], + "angle": 0, + "content": "Figure 2. The architecture of our proposed plain baseline and Pixel-SAIL. Pixel-SAIL is as simple and elegant as the plain baseline but demonstrates significantly improved performance. The examples on the right demonstrate that Pixel-SAIL possesses the capability for general conversation and comprehensive pixel-grounded understanding." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.394, + 0.357, + 0.409 + ], + "angle": 0, + "content": "to one-quarter of the original resolution." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.412, + 0.484, + 0.534 + ], + "angle": 0, + "content": "Visual Prompt Injection. Previous works [51, 72, 74] summarize the referenced object features via pooling on vision tokens from ViT encoder. However, there are no such visual tokens for encoder-free MLLMs. Thus, the inherent semantic deficiency hinders the single transformer's ability to precisely identify referenced objects based solely on feature summaries derived from patch embeddings, where most are low-level cues, such as edges." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.539, + 0.484, + 0.738 + ], + "angle": 0, + "content": "To overcome this limitation, we propose an innovative visual prompt injection mechanism. Our approach integrates multiple visual prompt special tokens \\(\\{VP_{i}|i\\in [1,N]\\}\\) into the large language model's vocabulary. These tokens' text embeddings \\(\\mathcal{V}\\mathcal{P}^t\\in \\mathbb{R}^{N\\times C}\\) are used to fill mask-based visual prompts \\(\\mathcal{M}^{vp}\\in \\mathbb{R}^{N\\times \\frac{H}{P}\\times \\frac{W}{P}}\\) , thereby creating visual prompt tokens \\(\\mathcal{V}\\mathcal{P}\\in \\mathbb{R}^{\\frac{HW}{P^2}\\times C}\\) . The vision tokens \\(\\mathcal{V}\\in \\mathbb{R}^{\\frac{HW}{P^2}\\times C}\\) are first added with these visual prompt tokens \\(\\mathcal{V}\\mathcal{P}\\) before being processed by the single transformer. This enhancement enables the model to accurately identify referenced objects by leveraging the corresponding special tokens \\(\\{VP_{i}|i\\in [1,N]\\}\\) within the text instructions." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.743, + 0.484, + 0.925 + ], + "angle": 0, + "content": "Dense Feature Distillation. Due to the lack of large-scale, high-quality segmentation data like SA-1B [26], the method produces poor-quality masks, particularly at object boundaries. However, directly training on large-scale segmentation datasets would be costly and damage the original instruction following capabilities. To address both, we employ pre-trained segmentation experts to distill the single transformer, ensuring optimization of object details without hurting VQA capabilities. We perform distillation by leveraging mask features generated by Mask2Former's [12] pixel decoder on the upsampled mask features \\( F_{h} \\in \\mathbb{R}_{\\frac{H}{4}}^{\\frac{H}{4} \\times \\frac{W}{4} \\times C} \\) and utilizing features produced by SAM2's [53] encoder" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.391, + 0.905, + 0.675 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.684, + 0.908, + 0.713 + ], + "angle": 0, + "content": "Figure 3. Visual examples on our PerBench. Best view it in color and zoom in." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.72, + 0.907, + 0.769 + ], + "angle": 0, + "content": "on the low-resolution features \\( F_{l} \\in \\mathbb{R}^{\\frac{H}{S} \\times \\frac{W}{S} \\times C} \\). This simple distillation strategy improves segmentation quality with only a negligible increase in training time." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.78, + 0.795, + 0.797 + ], + "angle": 0, + "content": "3.3. Benchmark and Dataset Engine" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.803, + 0.906, + 0.877 + ], + "angle": 0, + "content": "Our Benchmark: PerBench. We further manually annotate a benchmark named PerBench (Pixel-grounded Understanding Benchmark). PerBench aims to address three aspects lacking in existing pixel grounding benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.88, + 0.906, + 0.925 + ], + "angle": 0, + "content": "The first aspect is detailed object caption. Previous works [6, 34] have emphasized more detailed image captions, demonstrating that comprehensive captions signifi" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.089, + 0.115, + 0.482, + 0.282 + ], + "angle": 0, + "content": "cantly enhance model performance. However, current object caption datasets such as Osprey-724k [74] and evaluation benchmarks like Refcocog provide only cursory object captions. To address this limitation, we leverage SOTA models InternVL2.5-78B [11] and Qwen2.5VL-72B [2] to generate detailed object captions. These detailed object captions are then meticulously screened and refined through manual review, ultimately yielding 500 precise, nuanced object captions to serve as a robust evaluation benchmark. METEOR [3] serves as the evaluation metric for the detailed object caption task." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.284, + 0.485, + 0.569 + ], + "angle": 0, + "content": "The second aspect is the assessment of visual-prompt understanding ability in multiple-choice format. Although captioning tasks can accurately reflect a model's visual prompt understanding ability, precise and fair evaluation is difficult. Rule-based metrics such as CIDEr [57] and METEOR [3] are affected by response length, format, and ground-truth quality, while using models as evaluators inevitably introduces model bias. Therefore, a fair and quantitative visual-prompt understanding benchmark is necessary. Inspired by MMBench [43] and MME [20], we manually annotated 500 multiple-choice questions based on detailed object captions, covering the examination of models' understanding of referenced objects' appearance, attributes, uses, and relationships with surrounding objects. MLLMs need to perceive the attributes of referenced objects accurately and have instruction-following ability to select the appropriate choice correctly. Accuracy is selected as the evaluation metric for the visual prompt-based multiple-choice questions." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.572, + 0.483, + 0.782 + ], + "angle": 0, + "content": "The third aspect is segmenting objects jointly referenced by visual prompts and text, abbreviated as V-T RES. It aims to test the model's ability to understand objects indicated by user-input visual prompts and segment associated objects according to text instructions. This task comprehensively assesses the MLLM's pixel-grounded understanding ability, requiring the model to possess precise visual prompt understanding capabilities, text reasoning abilities, and pixel grounding skills. We also manually annotate 500 V-T RES samples, which five expert annotators double-check. Similar with RefCOCO series datasets, we select cIoU and gIoU as the evaluation metric for V-T RES task. The overall score of PerBench is the average of the normalized scores (0-100) from the above three tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.785, + 0.483, + 0.846 + ], + "angle": 0, + "content": "Our benchmark can be used to evaluate pixel-wise MLLMs and point out more challenging directions for detailed object understanding, joint visual prompts, and text understanding to the current community." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.847, + 0.483, + 0.907 + ], + "angle": 0, + "content": "Dataset Engine. To fully unleash the potential of the single transformer, we collect diverse pixel-grounded data, including segmentation datasets and visual-prompt understanding datasets, following previous works [16, 46]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.91, + 0.482, + 0.924 + ], + "angle": 0, + "content": "For segmentation-related data, we first use Ref-" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.115, + 0.905, + 0.252 + ], + "angle": 0, + "content": "COCO+/g [25, 70] and COCO [37] semantic segmentation data used in LISA [27], the Grandf dataset (214k samples) used in GLaMM [51], and MUSE data (246k samples) used in PixelLM [54]. We also use recent Pixel2Cap [69] data (comprising 20k images) and organized it into the referring segmentation format. Finally, we further add COCO [37] panoptic segmentation data and structured it as: \"Question: Please segment the {class name} in instance mode. Answer: {class name}-1 [SEG], ..., {class name}-n [SEG].\"" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.252, + 0.905, + 0.401 + ], + "angle": 0, + "content": "For visual prompt understanding, we employ two public datasets: Osprey-724k [74] and Pixel2Cap [69]. Additionally, we reformat the COCO dataset into a question-answer structure specifically designed to query object categories. To enhance the model's capability for fine-grained object description, we prompt the InternVL2.5-78B [11] model to generate approximately 300k detailed object captions derived from 10k SA-1B [26] images. Lastly, to maintain the instruction following ability, we also integrate the LLaVA1.5 [40] 665k dataset into our training data." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.402, + 0.905, + 0.463 + ], + "angle": 0, + "content": "Training. We combine all the aforementioned data for cotraining. The loss function consists of the next token prediction loss \\(\\mathcal{L}_{ntp}\\), the segmentation loss \\(\\mathcal{L}_{seg}\\), and the distillation loss \\(\\mathcal{L}_{distill}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.522, + 0.47, + 0.905, + 0.487 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {n t p} + \\mathcal {L} _ {s e g} + \\alpha \\mathcal {L} _ {\\text {d i s t i l l}}, \\quad \\mathcal {L} _ {s e g} = \\lambda \\mathcal {L} _ {c e} + \\beta \\mathcal {L} _ {\\text {s e g}}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.494, + 0.799, + 0.509 + ], + "angle": 0, + "content": "where \\(\\alpha\\) is set to 0.5, \\(\\lambda\\) to 2.0 and \\(\\beta\\) to 0.5." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.522, + 0.64, + 0.539 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.547, + 0.907, + 0.925 + ], + "angle": 0, + "content": "Implementation Details. We extensively evaluate our meta-architecture using two open-source encoder-free multimodal large language models: SOLO [8] and EVEv2 [16]. For SOLO, following [28], we modify the attention mechanism between vision tokens from causal attention to full attention and conduct supervised fine-tuning on the LLaVA1.5 665k dataset. For SOLO, we modify the attention mechanism between vision tokens from causal attention to full attention and replace the LLM with Qwen2.5 [66] 0.5B and 3B, respectively. For EVEv2, we retain its original architecture and weights without any modifications. We build Pixel-SAIL 0.5B and 3B based on our modified SOLO baseline, and 7B on EVEv2. When training Pixel-SAIL based on SOLO, we maintain the original resolution of input images. For images with a long side exceeding 1024, we preserve the aspect ratio and resize the long side to 1024. When training Pixel-SAIL based on EVEv2, we resize the images to the closest to \\(800^2\\) pixels to reduce training costs, which differs from the original setting of \\(1600^2\\). The training process is conducted on 32 A100 (80GB) GPUs using the AdamW [44] optimizer with a cosine decay learning rate scheduler. We set the initial learning rate to 4e-5, the warm-up ratio to 0.03, and the batch size to 256. The training duration for the 0.5B and 3B models is 12 hours and 24 hours, respectively." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.113, + 0.907, + 0.14 + ], + "angle": 0, + "content": "Table 1. Performance on referring segmentation benchmarks. The evaluation metric is cIoU. \"ft\" denotes fine-tuning on the specific dataset." + }, + { + "type": "table", + "bbox": [ + 0.15, + 0.143, + 0.846, + 0.479 + ], + "angle": 0, + "content": "
MethodLLM SizeRefCOCO+RefCOCOgRefCOCOgRefCOCO
valtestAtestBval(U)test(U)valtestAtestBvaltestAtestB
Referring Segmentation Specialist Without MLLM
VLT [17]-56.361.050.155.057.767.570.565.252.562.250.5
CRIS [59]-62.368.153.759.960.470.573.266.155.363.851.0
LAVT [68]-62.168.455.161.262.172.775.868.857.665.355.0
PolyFormer-L [42]-69.374.661.969.270.276.078.373.3---
ReLA [39]-66.071.057.765.066.073.876.570.256.459.058.4
MLLMs With Vision Expert
LISA (ft) [27]7B65.170.858.167.970.674.979.172.3---
PixelLM [54]7B66.371.758.369.370.573.076.568.2---
GSVA (ft) [63]7B64.567.758.671.172.076.477.472.861.769.260.3
GroundHog [81]7B70.575.064.974.174.678.579.975.766.7--
GlaMM (ft) [51]7B72.678.764.674.274.979.583.276.9---
SAM4MLLM [9]7B73.577.865.874.575.679.682.876.166.370.163.2
LaSagnA [60]7B66.470.660.170.671.976.878.773.838.150.442.1
OMG-LLaVA (ft) [80]7B69.173.163.072.972.978.080.374.1---
F-LLM [62]7B65.875.258.570.171.775.879.572.4---
Sa2VA [72]4B74.3--76.7-80.4-----
MLLMs Without Vision Expert
Pixel-SAIL0.5B70.875.865.475.476.777.980.575.963.971.563.6
Pixel-SAIL (ft)0.5B73.077.068.075.676.179.181.777.068.074.066.8
Pixel-SAIL3B75.779.772.078.780.480.882.679.067.774.667.1
Pixel-SAIL (ft)3B76.279.771.278.579.481.883.478.872.177.170.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.095, + 0.492, + 0.479, + 0.506 + ], + "angle": 0, + "content": "Table 2. Region caption performance on RefCOCOg dataset." + }, + { + "type": "table", + "bbox": [ + 0.1, + 0.509, + 0.472, + 0.551 + ], + "angle": 0, + "content": "
Method SizePixel-SAIL 0.5BPixel-SAIL 3BSa2VA 4BOMG-LLaVA 7BOsprey 7BGLaMM 7B
METEOR16.017.617.315.316.616.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.558, + 0.483, + 0.599 + ], + "angle": 0, + "content": "Table 3. The performance on our PerBench. Due to the lack of visual prompt understanding capability, LISA scores 0 on all tasks." + }, + { + "type": "table", + "bbox": [ + 0.1, + 0.601, + 0.472, + 0.704 + ], + "angle": 0, + "content": "
ModelSizeDetailed Caption METEORMCQ AccV-T RESOverall Score
cIoUgIoU
LISA [27]7B00000
Osprey [74]7B13.40.12008.5
GLaMM [51]7B12.60.1424.314.615.3
Sa2VA [72]4B19.20.7131.921.939.0
Pixel-SAIL0.5B21.40.6929.719.838.4
Pixel-SAIL3B24.20.7433.423.542.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.709, + 0.483, + 0.751 + ], + "angle": 0, + "content": "Table 4. Performance on the VQA benchmarks. \\(\\star\\) refers to the use of an \\(800^{2}\\) resolution, which differs from the \\(1600^{2}\\) resolution in the pre-trained model." + }, + { + "type": "table", + "bbox": [ + 0.1, + 0.755, + 0.472, + 0.848 + ], + "angle": 0, + "content": "
ModelLLM SizeMMEMMBenchSEEDMMStar
SOLO0.5B523.2/222.513.845.526.2
SOLO3B1155.7/257/553.465.440.3
EVEv2*7B1128.0/240.760.354.244.9
Pxiel-SAIL0.5B564.1/150.731.852.226.3
Pixel-SAIL3B1187.3/242.956.366.140.1
Pixel-SAIL*7B1081.0/260.458.964.744.3
" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.864, + 0.485, + 0.926 + ], + "angle": 0, + "content": "Evaluation Setup. For visual prompt understanding and general image QA tasks, we adhere to the same setting as the base MLLM. In the case of segmentation-related tasks, if the model fails to predict a [SEG] token, we compel it" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.504, + 0.907, + 0.534 + ], + "angle": 0, + "content": "to produce a [SEG] token to ensure the generation of the segmentation result." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.537, + 0.655, + 0.552 + ], + "angle": 0, + "content": "4.1. Main Results" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.56, + 0.907, + 0.832 + ], + "angle": 0, + "content": "Results on Referring Segmentation Benchmarks. We compare Pixel-SAIL with other pixel-grounded MLLMs and segmentation specialists on the RefCOCO+ [70], RefCOCOg [70], RefCOCO [25], and gRefCOCO [39] datasets. The comparison results are shown in Tab. 1. Pixel-SAIL 0.5B achieved 70.8, 75.4, and 77.9 cIoU on the validation splits of RefCOCO+, RefCOCOg, and RefCOCO, outperforming all segmentation specialists with comparable model sizes while also maintaining image conversation capabilities. Compared to the classical SAM-based MLLM competitor LISA-7B [27], Pixel-SAIL 0.5B surpassed it by 4.2, 7.9, and 7.8 cIoU on RefCOCO, RefCOCO+, and RefCOCOg respectively, despite having a much smaller model size (0.5B vs. 7B). On the more complex gRefCOCO dataset that includes multi-object segmentation, Pixel-SAIL 0.5B outperformed the carefully designed GSVA-7B [63] by 6.3, 4.8, and 6.5 cIoU on validation, testA, and testB splits respectively." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.833, + 0.909, + 0.926 + ], + "angle": 0, + "content": "When scaling the model to 3B, Pixel-SAIL achieved 75.7, 78.7, 80.8, and 67.7 cIoU on RefCOCO+, RefCOCOg, RefCOCO, and gRefCOCO datasets respectively, surpassing all larger-sized (7B) MLLMs assisted with vision experts. Pixel-SAIL-3B even outperformed the SOTA Sa2VA-4B [72] (which uses the powerful InternVL2-" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.114, + 0.323, + 0.156 + ], + "angle": 0, + "content": "Table 5. Ablation study on the components of Pixel-SAIL. \"RC\" denotes region caption on RefCOCOg dataset." + }, + { + "type": "table", + "bbox": [ + 0.098, + 0.16, + 0.318, + 0.242 + ], + "angle": 0, + "content": "
ModelRefCOCO+/gRC
Plain Baseline64.5/57.3/60.11.0
+ Upsampling69.7/62.5/65.30.9
+ Training Data76.2/69.6/73.81.4
+ VP Injection77.4/70.4/75.216.1
+ Distillation77.9/70.8/75.416.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.327, + 0.117, + 0.524, + 0.173 + ], + "angle": 0, + "content": "Table 6. Ablation study on Base MLLM. The training data only includes LLaVA-665k and Ref-COCO+/g." + }, + { + "type": "table", + "bbox": [ + 0.332, + 0.178, + 0.522, + 0.24 + ], + "angle": 0, + "content": "
MLLMSizeRefCOCO/+/g
SOLO0.5B69.7/62.5/65.3
SOLO3B73.2/66.4/69.1
EVEv27B74.9/68.7/71.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.526, + 0.121, + 0.905, + 0.15 + ], + "angle": 0, + "content": "Table 7. Ablation on the train- Table 8. Ablation study on ing data. \"RC\" denotes region the distillation strategy." + }, + { + "type": "table_caption", + "bbox": [ + 0.526, + 0.151, + 0.712, + 0.164 + ], + "angle": 0, + "content": "caption on RefCOCOg dataset." + }, + { + "type": "table", + "bbox": [ + 0.53, + 0.175, + 0.718, + 0.233 + ], + "angle": 0, + "content": "
DataRefCOCO+/gRC
Basic Data69.7/62.5/65.3-
+ Seg Data76.2/69.6/73.8-
+ VP Data77.4/70.4/75.216.1
" + }, + { + "type": "table", + "bbox": [ + 0.727, + 0.158, + 0.9, + 0.234 + ], + "angle": 0, + "content": "
DataRefCOCO+/g
w/o Distill77.5/70.5/75.5
M2F77.7/71.0/75.8
SAM277.8/70.9/75.9
Both78.1/70.8/76.1
" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.25, + 0.483, + 0.296 + ], + "angle": 0, + "content": "4B [10] and SAM2-L [53]), achieving performance advantages of 1.4 and 2.0 cIoU on the more challenging RefCOCO+ and RefCOCOg datasets respectively." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.299, + 0.484, + 0.464 + ], + "angle": 0, + "content": "Results on Visual Prompt Understanding Benchmarks. We evaluate the region caption performance on the RefCOCOg dataset, with results shown in Tab. 2. The training dataset of Pixel-SAIL does not include the RefCOCOg region caption dataset, so we directly evaluate its zero-shot performance. Pixel-SAIL-0.5B achieves a METEOR score of 16.0, surpassing OMG-LLaVA 7B by 0.7 points. When scaling the model to 3B, Pixel-SAIL achieves a METEOR score of 17.6, outperforming carefully designed larger models such as Osprey 7B and GLaMM 7B by 1.0 and 1.4 points respectively." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.468, + 0.484, + 0.739 + ], + "angle": 0, + "content": "Results on PerBench. We have benchmarked several popular pixel-grounded MLLMs on our proposed PerBench, with results shown in Tab. 3. LISA [27] scores 0 points across all tasks due to its inability to understand visual prompt inputs. Osprey [74] demonstrates strong object caption capabilities; however, it achieved only 13.4 METEOR in detailed caption tasks and \\(12.0\\%\\) accuracy in MCQ tasks due to limitations from short object caption lengths in its training data and impaired instruction-following ability. GLaMM [51] and Sa2VA [72] both exhibit comprehensive prompt understanding and segmentation capabilities, though GLaMM's weaker instruction-following ability resulted in only \\(14.0\\%\\) accuracy in MCQ tasks. PixelSAIL-0.5B achieves an overall score of 38.4, comparable to Sa2VA-4B despite Pixel-SAIL having a more powerful base MLLM and segmentation expert. Notably, Pixel-SAIL-3B achieves an overall score of 42.2, outperforming Sa2VA-4B across all three tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.743, + 0.484, + 0.925 + ], + "angle": 0, + "content": "Results on VQA Benchmarks. We compare the visual question answering performance of Pixel-SAIL with the corresponding base MLLMs on the MME [20], MM-Bench [43], SEED [29], and MMStar [7] benchmarks, and the results are presented in Tab. 4. When the model size is 0.5B, Pixel-SAIL demonstrates performance improvements over the base MLLM across all four benchmarks, particularly on MMBench, where the score increased from 13.8 to 31.8. However, when the model size is 3B and 7B, Pixel-SAIL's performance is on par with that of the base MLLMs, which may be constrained by the current quantity (less than 2M) and quality of visual prompts and segmentation data." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.248, + 0.593, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.597, + 0.248, + 0.644, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.646, + 0.249, + 0.755, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.757, + 0.248, + 0.837, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.838, + 0.249, + 0.901, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.293, + 0.673, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.673, + 0.293, + 0.824, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.825, + 0.293, + 0.901, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.339, + 0.673, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.673, + 0.338, + 0.75, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.751, + 0.338, + 0.901, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.38, + 0.651, + 0.432 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.652, + 0.38, + 0.75, + 0.432 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.751, + 0.38, + 0.836, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.836, + 0.38, + 0.897, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.437, + 0.906, + 0.548 + ], + "angle": 0, + "content": "Figure 4. Visualization results of Pixel-SAIL on diversity tasks. Best view it in color and zoom in. From top to bottom are visual prompt-based object caption, single/multi-object referring segmentation, vision-text referring segmentation, image caption and QA, and visual-prompt based conversation. Visual prompts in the form of points and boxes are converted into mask prompts using SAM [26]. For more visualization results and comparisons with other MLLMs, please refer to the appendix." + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.556, + 0.616, + 0.596 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.604, + 0.616, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.617, + 0.556, + 0.708, + 0.656 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.71, + 0.556, + 0.804, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.806, + 0.556, + 0.9, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.657, + 0.616, + 0.705 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.617, + 0.657, + 0.708, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.709, + 0.657, + 0.804, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.806, + 0.657, + 0.898, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.711, + 0.907, + 0.752 + ], + "angle": 0, + "content": "Figure 5. Image feature visualization results. From left to right are the image feature of the base MLLM, the image feature of Pixel-SAIL, and the mask feature of Pixel-SAIL." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.765, + 0.679, + 0.78 + ], + "angle": 0, + "content": "4.2. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.789, + 0.907, + 0.925 + ], + "angle": 0, + "content": "Effectiveness of Each Component. We conduct comprehensive ablation studies on the proposed components, with results presented in Tab. 5. Our plain baseline, trained with LLaVA-665k and RefCOCO+/g data, achieves only 64.5, 57.3, and 60.1 cIoU on the RefCOCO, RefCOCO+, and RefCOCOg datasets, respectively. Moreover, this baseline completely fails on the visual prompt understanding task, attaining merely 1.0 METEOR on the region caption task. Upon incorporating the learnable upsampling mod" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.486, + 0.417 + ], + "angle": 0, + "content": "ule, segmentation quality improves dramatically, with the model reaching 76.2, 69.6, and \\(73.8\\mathrm{cIoU}\\) on RefCOCO, RefCOCO+, and RefCOCOg. However, the model still cannot effectively interpret user-input visual prompts due to insufficient semantic information in the object representation. When we scale up the training data by introducing substantial amounts of segmentation data and visual-prompt understanding data, the model's segmentation capabilities are further enhanced. Despite scaling the training data, the model continues to struggle with visual prompt inputs because of the limited semantic information in the object representation. After implementing our proposed visual prompt injection mechanism, the model demonstrates significant improvements in visual prompt understanding, achieving 16.1 METEOR on the region caption task. Interestingly, we observe that enhanced visual prompt understanding capabilities positively influence referring segmentation performance. Finally, incorporating the distillation strategy further refines the model's detailed segmentation quality." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.42, + 0.485, + 0.662 + ], + "angle": 0, + "content": "Ablation on Various MLLMs. To demonstrate the effectiveness of Pixel-SAIL, we validate across different architectures and sizes, with results shown in Tab. 6. To reduce training costs, we use only LLaVA-665k and RefCOCO \\(+ / \\mathrm{g}\\) data for training and evaluate on the referring segmentation task. When using our modified 0.5B SOLO as the base MLLM, Pixel-SAIL achieves cIoU scores of 69.7, 62.5, and 65.3 on RefCOCO \\(+ / \\mathrm{g}\\). When scaling the model size to 3B, Pixel-SAIL's performance improves by 3.5, 3.9, and 3.8 cIoU on RefCOCO \\(+ / \\mathrm{g}\\). When using EVEv2-7B as the base MLLM, despite the attention between vision tokens changing from full attention to causal attention and the architecture transitioning to an MOE architecture, Pixel-SAIL achieves cIoU scores of 77.4, 70.4, and 75.2 on RefCOCO \\(+ / \\mathrm{g}\\), demonstrating that performance consistently increases with model scaling." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.665, + 0.485, + 0.877 + ], + "angle": 0, + "content": "Ablation on Data Scaling. Data plays a crucial role in the performance of Pixel-SAIL. As shown in Tab. 7, we conduct comprehensive ablation studies on the training data to evaluate its impact. When trained solely with basic data (including LLaVA-665k and RefCOCO+/g datasets), Pixel-SAIL achieves 69.7, 62.5, and 65.3 cIoU on RefCOCO, RefCOCO+, and RefCOCOg, respectively. Upon scaling the segmentation-related data, Pixel-SAIL demonstrates significant performance improvements of 6.5, 7.1, and 8.5 cIoU on these datasets. Furthermore, incorporating visual prompt data for mixed training not only enhances the model's visual prompt understanding capabilities but also yields additional performance gains of 1.2, 0.8, and 1.4 cIoU on RefCOCO, RefCOCO+, and RefCOCOg, respectively." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.88, + 0.485, + 0.926 + ], + "angle": 0, + "content": "Ablation on Distillation Strategy. Distillation is a highly effective method for infusing knowledge into Pixel-SAIL. We conduct ablation studies on the distillation strategy, and" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.115, + 0.908, + 0.298 + ], + "angle": 0, + "content": "the results are presented in Tab. 8. We use the average cIoU across all splits as the evaluation metric. When only Mask2Former [12] is employed to distill high-resolution mask features, Pixel-SAIL achieves performance gains of 0.2, 0.5, and 0.3 on RefCOCO \\(+ / \\mathrm{g}\\). When SAM2 [53] is used to distill low-resolution image features, Pixel-SAIL obtains performance improvements of 0.3, 0.4, and 0.4 on RefCOCO \\(+ / \\mathrm{g}\\). When both teacher models are utilized collaboratively, performance gains of 0.6, 0.3, and 0.5 are achieved. Additionally, the extra computational cost introduced by the distillation strategy is minimal, increasing the training time by only about \\(5\\%\\) for Pixel-SAIL-0.5B." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.306, + 0.722, + 0.321 + ], + "angle": 0, + "content": "4.3. Visualization Analysis" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.327, + 0.907, + 0.388 + ], + "angle": 0, + "content": "Visual Comparison. In Fig. 4, we showcase Pixel-SAIL's visualization results on diverse tasks. Pixel-SAIL flexibly interprets both visual prompts and text instruction inputs, responding with text and segmentation masks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.388, + 0.909, + 0.614 + ], + "angle": 0, + "content": "Visual Affinity Map Analysis. We use PCA dimensionality reduction algorithm to visualize vision features, with results shown in Fig. 5. Our Pixel-SAIL's image features (3rd column) are denser and more diverse compared to the base MLLM's image features (2nd column). Pixel-SAIL's mask features, after the upsampling module, are denser and have better segmentation edges. Interestingly, Pixel-SAIL's image features (more focused on understanding, combining factors such as categories, colors, positions, etc.) exhibit different characteristics from mask features (more focused on perception, categories, and instances). As seen in the second row's third and fourth columns, the cars on the left and right have relatively distant feature representations in the image features, while they are very close in the mask features." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.627, + 0.634, + 0.643 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.653, + 0.909, + 0.834 + ], + "angle": 0, + "content": "We explore the simplest architecture for pixel-grounded understanding tasks. In particular, we present Pixel-SAIL, which extends current SAIL-like MLLM for fine-grained understanding with three technical improvements (learnable upsampling module, new visual prompt encoding, and segmentor feature distillation). For the first time, our work proves that even without extra visual experts (visual encoder, segmentation models), one single transformer can still achieve stronger performance on four public referring segmentation benchmarks. We further introduce a more challenging benchmark, Perbench, to promote the development of pixel-MLLM community." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.834, + 0.909, + 0.925 + ], + "angle": 0, + "content": "Limitation and Future Work. Our work provides the simplest solution for pixel-grounded tasks. However, one limitation is that we only adopt 1.7M data for co-training. We will further explore Pixel-SAIL on more data (for example, billion-level masks along with visual prompts [26]) for cotraining." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.114, + 0.188, + 0.129 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.139, + 0.482, + 0.208 + ], + "angle": 0, + "content": "[1] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.21, + 0.482, + 0.264 + ], + "angle": 0, + "content": "[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.267, + 0.482, + 0.308 + ], + "angle": 0, + "content": "[3] Satanjeev Banerjee and Alon Lavie. Meteor: An automatic metric for mt evaluation with improved correlation with human judgments. ACL, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.311, + 0.482, + 0.365 + ], + "angle": 0, + "content": "[4] Mu Cai, Haotian Liu, Siva Karthik Mustikovela, Gregory P. Meyer, Yuning Chai, Dennis Park, and Yong Jae Lee. Making large multimodal models understand arbitrary visual prompts. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.368, + 0.482, + 0.422 + ], + "angle": 0, + "content": "[5] Zheng Cai, Maosong Cao, Haojiong Chen, Kai Chen, Keyu Chen, Xin Chen, Xun Chen, Zehui Chen, Zhi Chen, Pei Chu, et al. Internl m2 technical report. arXiv preprint arXiv:2403.17297, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.425, + 0.482, + 0.48 + ], + "angle": 0, + "content": "[6] Lin Chen, Jisong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions. arXiv preprint arXiv:2311.12793, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.482, + 0.482, + 0.55 + ], + "angle": 0, + "content": "[7] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.553, + 0.482, + 0.594 + ], + "angle": 0, + "content": "[8] Yangyi Chen, Xingyao Wang, Hao Peng, and Heng Ji. A single transformer for scalable vision-language modeling. TMLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.597, + 0.482, + 0.651 + ], + "angle": 0, + "content": "[9] Yi-Chia Chen, Wei-Hua Li, Cheng Sun, Yu-Chiang Frank Wang, and Chu-Song Chen. Sam4mllm: Enhance multimodal large language model for referring expression segmentation. ECCV, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.654, + 0.482, + 0.723 + ], + "angle": 0, + "content": "[10] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.725, + 0.482, + 0.793 + ], + "angle": 0, + "content": "[11] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.796, + 0.482, + 0.849 + ], + "angle": 0, + "content": "[12] Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In CVPR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.853, + 0.482, + 0.895 + ], + "angle": 0, + "content": "[13] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In ACL, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.897, + 0.482, + 0.924 + ], + "angle": 0, + "content": "[14] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.139, + 0.482, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.548, + 0.116, + 0.905, + 0.142 + ], + "angle": 0, + "content": "vision-language models. arXiv preprint arXiv:2406.11832, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.146, + 0.905, + 0.186 + ], + "angle": 0, + "content": "[15] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. NeurIPS, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.188, + 0.905, + 0.243 + ], + "angle": 0, + "content": "[16] Haiwen Diao, Xiaotong Li, Yufeng Cui, Yueze Wang, Haoge Deng, Ting Pan, Wenxuan Wang, Huchuan Lu, and Xinlong Wang. Eve2: Improved baselines for encoder-free vision-language models. arXiv preprint arXiv:2502.06788, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.246, + 0.904, + 0.285 + ], + "angle": 0, + "content": "[17] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In ICCV, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.288, + 0.905, + 0.342 + ], + "angle": 0, + "content": "[18] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In ACMMM, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.344, + 0.904, + 0.385 + ], + "angle": 0, + "content": "[19] Guang Feng, Zhiwei Hu, Lihe Zhang, and Huchuan Lu. Encoder fusion network with co-attention embedding for referring image segmentation. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.387, + 0.904, + 0.455 + ], + "angle": 0, + "content": "[20] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, Yunsheng Wu, and Rongrong Ji. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.458, + 0.904, + 0.498 + ], + "angle": 0, + "content": "[21] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.501, + 0.905, + 0.568 + ], + "angle": 0, + "content": "[22] Louis Martin Hugo Touvron, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, and et al. Llama 2: Open foundation and fine-tuned chat models. arXiv:2307.09288, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.571, + 0.905, + 0.624 + ], + "angle": 0, + "content": "[23] Tianrui Hui, Si Liu, Shaofei Huang, Guanbin Li, Sansi Yu, Faxi Zhang, and Jizhong Han. Linguistic structure guided context modeling for referring image segmentation. In ECCV, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.627, + 0.905, + 0.682 + ], + "angle": 0, + "content": "[24] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.684, + 0.905, + 0.725 + ], + "angle": 0, + "content": "[25] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In EMNLP, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.727, + 0.905, + 0.781 + ], + "angle": 0, + "content": "[26] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. ICCV, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.784, + 0.905, + 0.824 + ], + "angle": 0, + "content": "[27] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.826, + 0.905, + 0.881 + ], + "angle": 0, + "content": "[28] Weixian Lei, Jiacong Wang, Haochen Wang, Xiangtai Li, Jun Hao Liew, Jiashi Feng, and Zilong Huang. The scalability of simplicity: Empirical analysis of vision-language learning with a single transformer. arXiv, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.883, + 0.905, + 0.924 + ], + "angle": 0, + "content": "[29] Bohao Li, Yuying Ge, Yixiao Ge, Guangzhi Wang, Rui Wang, Ruimao Zhang, and Ying Shan. Seed-bench: Benchmarking multimodal large language models. In CVPR, 2024." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.116, + 0.905, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.959 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.116, + 0.484, + 0.171 + ], + "angle": 0, + "content": "[30] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.173, + 0.482, + 0.227 + ], + "angle": 0, + "content": "[31] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In ICML, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.229, + 0.482, + 0.284 + ], + "angle": 0, + "content": "[32] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.286, + 0.482, + 0.342 + ], + "angle": 0, + "content": "[33] Xiangtai Li, Haobo Yuan, Wei Li, Henghui Ding, Size Wu, Wenwei Zhang, Yining Li, Kai Chen, and Chen Change Loy. Omg-seg: Is one model good enough for all segmentation? In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.343, + 0.482, + 0.398 + ], + "angle": 0, + "content": "[34] Xiaotong Li, Fan Zhang, Haiwen Diao, Yueze Wang, Xinlong Wang, and Ling-Yu Duan. Densefusion-1m: Merging vision experts for comprehensive multimodal perception. arXiv preprint arXiv:2407.08303, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.4, + 0.482, + 0.44 + ], + "angle": 0, + "content": "[35] Yanghao Li, Hanzi Mao, Ross Girshick, and Kaiming He. Exploring plain vision transformer backbones for object detection. ECCV, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.442, + 0.482, + 0.497 + ], + "angle": 0, + "content": "[36] Chen Liang, Wenguan Wang, Tianfei Zhou, Jiaxu Miao, Yawei Luo, and Yi Yang. Local-global context aware transformer for language-guided video segmentation. arXiv preprint arXiv:2203.09773, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.499, + 0.482, + 0.553 + ], + "angle": 0, + "content": "[37] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.555, + 0.482, + 0.624 + ], + "angle": 0, + "content": "[38] Weifeng Lin, Xinyu Wei, Ruichuan An, Peng Gao, Bocheng Zou, Yulin Luo, Siyuan Huang, Shanghang Zhang, and Hongsheng Li. Draw-and-understand: Leveraging visual prompts to enable mllms to comprehend what you want. arXiv preprint arXiv:2403.20271, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.626, + 0.482, + 0.653 + ], + "angle": 0, + "content": "[39] Chang Liu, Henghui Ding, and Xudong Jiang. GRES: Generalized referring expression segmentation. In CVPR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.655, + 0.482, + 0.682 + ], + "angle": 0, + "content": "[40] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.684, + 0.482, + 0.724 + ], + "angle": 0, + "content": "[41] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.726, + 0.482, + 0.781 + ], + "angle": 0, + "content": "[42] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. *Polyformer: Referring image segmentation as sequential polygon generation*. *CVPR*, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.783, + 0.482, + 0.838 + ], + "angle": 0, + "content": "[43] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In ECCV, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.839, + 0.482, + 0.867 + ], + "angle": 0, + "content": "[44] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.868, + 0.482, + 0.896 + ], + "angle": 0, + "content": "[45] Timo Lüddecke and Alexander Ecker. Image segmentation using text and image prompts. In CVPR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.897, + 0.482, + 0.924 + ], + "angle": 0, + "content": "[46] Gen Luo, Xue Yang, Wenhan Dou, Zhaokai Wang, Jifeng Dai, Yu Qiao, and Xizhou Zhu. Mono-internvl: Pushing the" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.116, + 0.484, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.116, + 0.905, + 0.144 + ], + "angle": 0, + "content": "boundaries of monolithic multimodal large language models with endogenous visual pre-training. CVPR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.146, + 0.905, + 0.187 + ], + "angle": 0, + "content": "[47] Chuofan Ma, Yi Jiang, Jiannan Wu, Zehuan Yuan, and Xiaojuan Qi. Groma: Localized visual tokenization for grounding multimodal large language models. In ECCV, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.189, + 0.905, + 0.257 + ], + "angle": 0, + "content": "[48] Shehan Munasinghe, Hanan Gani, Wenqi Zhu, Jiale Cao, Eric Xing, Fahad Shahbaz Khan, and Salman Khan. Videoglamm: A large multimodal model for pixel-level visual grounding in videos. arXiv preprint arXiv:2411.04923, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.259, + 0.905, + 0.314 + ], + "angle": 0, + "content": "[49] Lu Qi, Yi-Wen Chen, Lehan Yang, Tiancheng Shen, Xiangtai Li, Weidong Guo, Yu Xu, and Ming-Hsuan Yang. Generalizable entity grounding via assistance of large language model. arXiv preprint arXiv:2402.02555, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.316, + 0.905, + 0.384 + ], + "angle": 0, + "content": "[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.386, + 0.905, + 0.455 + ], + "angle": 0, + "content": "[51] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M. Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S. Khan. Glamm: Pixel grounding large multimodal model. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.457, + 0.905, + 0.512 + ], + "angle": 0, + "content": "[52] Jeff Rasley, Samyam Rajbhandari, Olatunj Ruwase, and Yuxiong He. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In SIGKDD, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.514, + 0.905, + 0.582 + ], + "angle": 0, + "content": "[53] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.584, + 0.905, + 0.627 + ], + "angle": 0, + "content": "[54] Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.629, + 0.905, + 0.71 + ], + "angle": 0, + "content": "[55] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Austin Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. In NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.712, + 0.905, + 0.795 + ], + "angle": 0, + "content": "[56] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. Llama: Open and efficient foundation language models. arXiv:2302.13971, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.797, + 0.905, + 0.837 + ], + "angle": 0, + "content": "[57] Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. Cider: Consensus-based image description evaluation. CVPR, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.839, + 0.905, + 0.881 + ], + "angle": 0, + "content": "[58] Jiaqi Wang, Pan Zhang, Tao Chu, Yuhang Cao, Yujie Zhou, Tong Wu, Bin Wang, Conghui He, and Dahua Lin. V3det: Vast vocabulary visual detection dataset. In ICCV, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.883, + 0.905, + 0.924 + ], + "angle": 0, + "content": "[59] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In CVPR, 2022." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.116, + 0.905, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.116, + 0.484, + 0.158 + ], + "angle": 0, + "content": "[60] Cong Wei, Haoxian Tan, Yujie Zhong, Yujiu Yang, and Lin Ma. LaSagnA: Language-based segmentation assistant for complex queries. arXiv preprint arXiv:2404.08506, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.16, + 0.484, + 0.215 + ], + "angle": 0, + "content": "[61] Jianzong Wu, Xiangtai Li, Shilin Xu, Haobo Yuan, Henghui Ding, Yibo Yang, Xia Li, Jiangning Zhang, Yunhai Tong, Xudong Jiang, Bernard Ghanem, and Dacheng Tao. Towards open vocabulary learning: A survey. arXiv pre-print, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.217, + 0.482, + 0.257 + ], + "angle": 0, + "content": "[62] Size Wu, Sheng Jin, Wenwei Zhang, Lumin Xu, Wentao Liu, Wei Li, and Chen Change Loy. F-lmm: Grounding frozen large multimodal models. CVPR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.26, + 0.482, + 0.302 + ], + "angle": 0, + "content": "[63] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.304, + 0.482, + 0.357 + ], + "angle": 0, + "content": "[64] Hu Xu, Saining Xie, Xiaoqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. arXiv preprint arXiv:2309.16671, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.36, + 0.482, + 0.567 + ], + "angle": 0, + "content": "[65] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.569, + 0.482, + 0.624 + ], + "angle": 0, + "content": "[66] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.626, + 0.482, + 0.68 + ], + "angle": 0, + "content": "[67] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.683, + 0.482, + 0.737 + ], + "angle": 0, + "content": "[68] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.74, + 0.482, + 0.794 + ], + "angle": 0, + "content": "[69] Zuyao You, Junke Wang, Lingyu Kong, Bo He, and Zuxuan Wu. Pix2cap-coco: Advancing visual comprehension via pixel-level captioning. arXiv preprint arXiv:2501.13893, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.798, + 0.482, + 0.838 + ], + "angle": 0, + "content": "[70] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In ECCV, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.841, + 0.482, + 0.895 + ], + "angle": 0, + "content": "[71] Haobo Yuan, Xiangtai Li, Chong Zhou, Yining Li, Kai Chen, and Chen Change Loy. Open-vocabulary sam: Segment and recognize twenty-thousand classes interactively. arXiv preprint, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.897, + 0.482, + 0.925 + ], + "angle": 0, + "content": "[72] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.116, + 0.484, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.116, + 0.905, + 0.158 + ], + "angle": 0, + "content": "Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv preprint arXiv:2501.04001, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.16, + 0.905, + 0.214 + ], + "angle": 0, + "content": "[73] Haobo Yuan, Tao Zhang, Xiangtai Li, Lu Qi, Zilong Huang, Shilin Xu, Jiashi Feng, and Ming-Hsuan Yang. 4th pvuw mevis 3rd place report: Sa2va. arXiv preprint arXiv:2504.00476, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.217, + 0.905, + 0.257 + ], + "angle": 0, + "content": "[74] Yuqian Yuan, Wentong Li, Jian Liu, Dongqi Tang, Xinjie Luo, Chi Qin, Lei Zhang, and Jianke Zhu. Osprey: Pixel understanding with visual instruction tuning. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.259, + 0.905, + 0.299 + ], + "angle": 0, + "content": "[75] Alireza Zareian, Kevin Dela Rosa, Derek Hao Hu, and Shih-Fu Chang. Open-vocabulary object detection using captions. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.301, + 0.905, + 0.342 + ], + "angle": 0, + "content": "[76] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.344, + 0.905, + 0.455 + ], + "angle": 0, + "content": "[77] Pan Zhang, Xiaoyi Dong, Bin Wang, Yuhang Cao, Chao Xu, Linke Ouyang, Zhiyuan Zhao, Shuangrui Ding, Songyang Zhang, Haodong Duan, Wenwei Zhang, Hang Yan, Xinyue Zhang, Wei Li, Jingwen Li, Kai Chen, Conghui He, Xingcheng Zhang, Yu Qiao, Dahua Lin, and Jiaqi Wang. Internlm-xcomposer: A vision-language large model for advanced text-image comprehension and composition. arXiv preprint arXiv:2309.15112, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.457, + 0.905, + 0.497 + ], + "angle": 0, + "content": "[78] Tao Zhang, Xingye Tian, Yu Wu, Shunping Ji, Xuebo Wang, Yuan Zhang, and Pengfei Wan. DVIS: Decoupled video instance segmentation framework. In ICCV, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.499, + 0.905, + 0.567 + ], + "angle": 0, + "content": "[79] Tao Zhang, Xingye Tian, Yikang Zhou, Shunping Ji, Xuebo Wang, Xin Tao, Yuan Zhang, Pengfei Wan, Zhongyuan Wang, and Yu Wu. Dvis++: Improved decoupled framework for universal video segmentation. arXiv preprint arXiv:2312.13305, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.569, + 0.905, + 0.624 + ], + "angle": 0, + "content": "[80] Tao Zhang, Xiangtai Li, Hao Fei, Haobo Yuan, Shengqiong Wu, Shunping Ji, Change Loy Chen, and Shuicheng Yan. Omg-llava: Bridging image-level, object-level, pixel-level reasoning and understanding. In NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.626, + 0.905, + 0.667 + ], + "angle": 0, + "content": "[81] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.669, + 0.905, + 0.737 + ], + "angle": 0, + "content": "[82] Yikang Zhou, Tao Zhang, Shilin Xu, Shihao Chen, Qianyu Zhou, Yunhai Tong, Shunping Ji, Jiangning Zhang, Xiangtai Li, and Lu Qi. Are they the same? exploring visual correspondence shortcomings of multimodal llms. arXiv preprint arXiv:2501.04670, 2025." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.116, + 0.905, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.155, + 0.109, + 0.844, + 0.132 + ], + "angle": 0, + "content": "Pixel-SAIL: Single Transformer For Pixel-Grounded Understanding" + }, + { + "type": "text", + "bbox": [ + 0.383, + 0.143, + 0.615, + 0.163 + ], + "angle": 0, + "content": "Supplementary Material" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.181, + 0.483, + 0.271 + ], + "angle": 0, + "content": "We first present more details on training and testing of our Pixel-SAIL in Sec. 6. Then, we present the detailed benchmark building process, in Sec. 7 and more challenging examples in PerBench in Sec. 8. Next, we present more comparison with current state-of-the-art pixel-grounded MLLMs, in Sec. 9." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.289, + 0.415, + 0.308 + ], + "angle": 0, + "content": "6. More Detailed Training and Testing" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.316, + 0.483, + 0.647 + ], + "angle": 0, + "content": "Training. We will present more details about the training, including dataset sampling specifications and distillation methodology. For the RefCOCO series [25, 70] datasets, we randomly sample 5 referring expressions per image and organize them into a multi-round dialogue format as a single training data point, processing all images for four epochs. For COCO [37] data, we sample 5 categories per image and randomly select either instance mode or semantic mode to structure the responses. In instance mode, objects are arranged by their center points from left to right. We process the COCO dataset for one epoch. For Pixel2Cap [69], our generated detailed object caption data, and Osprey [74] object description data, we randomly sample 1-5 visual prompts per image and randomly incorporate questions about non-existent visual prompts, with responses indicating that these visual prompts do not exist. These object caption datasets are processed for five epochs. For other segmentation-related or visual prompt-related data, we conduct one epoch. For LLaVA-665k, we randomly sample at a 1:1 ratio alongside other data for joint training to ensure that the base MLLM's instruction-following capability remains intact." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.65, + 0.483, + 0.725 + ], + "angle": 0, + "content": "When the length of input tokens (including the length of vision tokens) exceeds 8192, we truncate the excess portion. For the 0.5B model, we use DeepSpeed Zero-1 [52] for training, and for the 3B and 7B models, we use DeepSpeed Zero-2 [52] for training." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.727, + 0.483, + 0.862 + ], + "angle": 0, + "content": "We distill the mask features generated by the Mask2Former [12] pixel decoder and the lowest resolution features generated by the SAM2 [53] image encoder onto the upsampled mask features from Pixel-SAIL and the image features directly reshaped from vision tokens, respectively. We use bilinear interpolation to align spatial dimensions and implement a learnable linear layer to align the channel size. The distillation process employs MSE loss with a weight of 0.5." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.864, + 0.483, + 0.924 + ], + "angle": 0, + "content": "Testing. We have elaborated on the testing details of Pixel-SAIL on pixel-grounded benchmarks in the main text. For general image question answering benchmarks, we follow the prompt settings of the base MLLMs and use" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.181, + 0.905, + 0.211 + ], + "angle": 0, + "content": "VLMEvalKit [18] for evaluation, without using additional LLM assistance to identify answers." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.224, + 0.905, + 0.259 + ], + "angle": 0, + "content": "7. More Detailed Process on Benchmarking Building" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.267, + 0.905, + 0.327 + ], + "angle": 0, + "content": "The construction of PerBench combines an automated model-generated pipeline with manual screening, correction, and annotation. The process is divided into three stages." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.328, + 0.905, + 0.478 + ], + "angle": 0, + "content": "The first stage involves annotating detailed object captions. We crop objects and draw visual prompts on the original images to prompt InternVL-2.5 78B [10] and Qwen-VL 2.5 72B [2] to generate detailed captions for the objects. These captions are then cross-validated using Qwen2.5 72B [66]. If all captions are consistent, they are integrated using an LLM; otherwise, the data are discarded. After the model automatically generates the detailed object captions, we manually select and correct 500 of them to form the final 500 detailed object caption data points in the benchmark." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.48, + 0.905, + 0.584 + ], + "angle": 0, + "content": "The second stage focuses on annotating visual-prompt question-answering data in an MCQ (Multiple Choice Question) format. In this phase, we manually generate a multiple-choice question for each object caption obtained from the first stage. After completing the annotations, two quality control specialists perform cross-verification to identify and rectify any potential errors." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.585, + 0.905, + 0.718 + ], + "angle": 0, + "content": "The final stage contains the annotation of visual-text referring segmentation data. At this stage, we manually select and annotate object segmentation masks, referring visual prompts, and text from SAM images. During the annotation process, we consider various factors such as positional relationships, event relationships, appearance, size, and more, including cases with both single and multiple visual prompts. Once the annotation is complete, two individuals review it, and correct the errors." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.733, + 0.849, + 0.75 + ], + "angle": 0, + "content": "8. More Challenging Cases in PerBench" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.758, + 0.905, + 0.878 + ], + "angle": 0, + "content": "We present more detailed object caption samples from our PerBench in Fig. 6. The objects are derived from diverse senses and categories, encompassing humans, man-made objects, and natural landscapes. The object captions include basic categories, attributes, purposes, and relationships with surrounding objects. This high-quality benchmark will effectively advance the development of the visual prompt understanding community." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.88, + 0.905, + 0.924 + ], + "angle": 0, + "content": "More referring segmentation samples are illustrated in Fig. 7. Our manually annotated samples cover a variety of scenes, such as indoor and outdoor settings, and include" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.948, + 0.504, + 0.959 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.114, + 0.277, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.278, + 0.117, + 0.51, + 0.268 + ], + "angle": 0, + "content": "The object is a person standing behind a wooden podium covered with a blue cloth, addressing an audience outdoors. The podium has an emblem, and the person is dressed in a dark blue jacket with a logo, a scarf, and a white shirt. The audience includes people seated on a stool and a folding chair, with trees, parked cars, and a building with large windows visible in the background." + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.114, + 0.617, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.619, + 0.118, + 0.884, + 0.281 + ], + "angle": 0, + "content": "The object is a bronze statue of a young boy with exaggerated features. The statue is dressed in a formal outfit including a suit jacket, vest, and shorts, and it wears flip-flops. The boy holds a small object resembling a ball in his right hand. The statue's whimsical and playful appearance, characterized by exaggerated proportions and a sense of movement, is set on a rock-like structure in an urban street scene with a pedestrian crossing visible in the background." + }, + { + "type": "text", + "bbox": [ + 0.631, + 0.281, + 0.893, + 0.456 + ], + "angle": 0, + "content": "The object is a Smart Fortwo, a compact city car known for its small size and maneuverability. This car features predominantly white with green accents, including green wheels and trim. Designed for urban environments with limited parking space, it is parked on a sidewalk next to a charging station, indicating its electric vehicle status. The charging cable is connected, suggesting it is currently being charged. The surrounding area includes a paved road with multiple lanes and a grassy area separated by a curb." + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.285, + 0.253, + 0.45 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.282, + 0.48, + 0.459 + ], + "angle": 0, + "content": "The object is a person dressed in black and blue attire, standing on skis in a snowy outdoor setting, preparing for or participating in a skiing event. This person is wearing a black beanie, a black puffy jacket, black gloves, black pants, and a blue bib with the number \"1\" in red. They also have ski poles and ski boots. In the background, there are other people skiing or standing around, along with parked cars and trees, indicating a recreational or competitive skiing area." + }, + { + "type": "image", + "bbox": [ + 0.482, + 0.315, + 0.63, + 0.399 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.461, + 0.255, + 0.631 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.465, + 0.444, + 0.627 + ], + "angle": 0, + "content": "The object is a stack of white bangles adorned with small, colorful stones. These bangles, part of traditional jewelry, are worn on the arm of a person dressed in vibrant traditional attire, including a yellow scarf with colorful patterns and a floral-patterned garment, complementing the overall colorful and cultural appearance." + }, + { + "type": "image", + "bbox": [ + 0.449, + 0.498, + 0.628, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.63, + 0.461, + 0.892, + 0.621 + ], + "angle": 0, + "content": "The object is a waterfall cascading down a steep, rocky cliff. The waterfall serves as the central focus, with water flowing smoothly from top to bottom. The area around the waterfall features rugged, uneven rock surfaces and patches of green vegetation. The cliff is part of a larger mountainous or hilly terrain, with dense foliage at the top. The waterfall's flow and movement create a striking contrast against the dark, textured rocks, highlighting the natural beauty of the scene." + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.661, + 0.307, + 0.768 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.309, + 0.634, + 0.46, + 0.808 + ], + "angle": 0, + "content": "The object is a black shoulder bag with a flap closure. It is made of a synthetic or leather-like material, has a smooth texture and sheen, and is carried over the shoulder of a person walking in a grassy area near a wooden fence and a small white stool, dressed in a black top and pink shorts." + }, + { + "type": "image", + "bbox": [ + 0.469, + 0.652, + 0.656, + 0.747 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.657, + 0.621, + 0.897, + 0.807 + ], + "angle": 0, + "content": "The object is a navigational buoy used in maritime environments. It is a cylindrical structure with a pointed top, primarily white in color, featuring a pink top and a distinctive band. Mounted on a black base, it is situated in a body of water such as a harbor or marina, surrounded by other boats and buoys. Its function is to guide vessels through waterways, aiding navigation with its unique color pattern. This buoy is part of a larger maritime setting, often near populated areas or popular boating destinations." + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.82, + 0.766, + 0.835 + ], + "angle": 0, + "content": "Figure 6. More visualization examples of detailed object captions from our PerBench." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.861, + 0.483, + 0.922 + ], + "angle": 0, + "content": "objects of multiple granularities. The referring text encompasses positional relationships, event relationships, and more. This new task is more challenging than current pure text referring segmentation tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.106, + 0.112, + 0.882, + 0.127 + ], + "angle": 0, + "content": "Q: Please segment which object \\( \\langle \\text{vp\\_0} \\rangle \\) is using. A: [SEG] Q: Please segment the \\( \\langle \\text{vp\\_0} \\rangle \\) that is sitting. A: [SEG]" + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.128, + 0.502, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.128, + 0.888, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.253, + 0.473, + 0.267 + ], + "angle": 0, + "content": "Q: Please segment where will arrive. A: [SEG]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.24, + 0.828, + 0.268 + ], + "angle": 0, + "content": "Q: Please segment the object that blocks the sunlight for \\(<\\mathrm{vp\\_0}\\). A: [SEG]" + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.268, + 0.502, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.268, + 0.895, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.373, + 0.438, + 0.399 + ], + "angle": 0, + "content": "Q: Please segment the letter closest to . \nA: [SEG]" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.373, + 0.88, + 0.4 + ], + "angle": 0, + "content": "Q: Please segment the object that is enjoying. \nA: [SEG]" + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.401, + 0.501, + 0.504 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.402, + 0.897, + 0.504 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.512, + 0.36, + 0.539 + ], + "angle": 0, + "content": "Q: Please segment the person who is holding \\( \\langle \\text{vp\\_0} \\rangle \\). A: [SEG]" + }, + { + "type": "text", + "bbox": [ + 0.391, + 0.522, + 0.84, + 0.538 + ], + "angle": 0, + "content": "Q: Please segment the tree between \\( \\langle \\text{vp}_0 \\rangle \\) and \\( \\langle \\text{vp}_1 \\rangle \\). A: [SEG]" + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.54, + 0.346, + 0.68 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.375, + 0.546, + 0.892, + 0.68 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.197, + 0.692, + 0.799, + 0.707 + ], + "angle": 0, + "content": "Figure 7. More visualization examples of vision-text referring segmentation from our PerBench." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.732, + 0.483, + 0.765 + ], + "angle": 0, + "content": "9. More Comparison With SOTA Pixel-Grounded MLLM" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.789, + 0.484, + 0.925 + ], + "angle": 0, + "content": "We conduct a qualitative comparative analysis with the SOTA pixel-grounded MLLM, Sa2VA [72], and present the visualization results in Fig. 8. We observe that both Pixel-SAIL and Sa2VA achieve excellent results in most cases. However, Sa2VA performs significantly weaker than Pixel-SAIL in certain scenarios, despite utilizing the much more powerful InternVL2.5 [10] compared to our base encoder-free MLLM [8]. In the left examples, Sa2VA performs notably worse than Pixel-SAIL in multi-object segmentation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.734, + 0.907, + 0.809 + ], + "angle": 0, + "content": "tasks. Additionally, in the right example, Sa2VA demonstrates significantly weaker attention to non-core areas of the image, such as edges, compared to Pixel-SAIL, leading to frequent failures in segmenting objects near image boundaries." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.101, + 0.442, + 0.16, + 0.496 + ], + "angle": 0, + "content": "Please segment the two women." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.513, + 0.16, + 0.595 + ], + "angle": 0, + "content": "Please segment all the mans in right side." + }, + { + "type": "image_caption", + "bbox": [ + 0.21, + 0.405, + 0.28, + 0.416 + ], + "angle": 0, + "content": "Pixel-SAIL" + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.418, + 0.331, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.394, + 0.405, + 0.44, + 0.416 + ], + "angle": 0, + "content": "Sa2VA" + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.418, + 0.501, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.599, + 0.404, + 0.671, + 0.416 + ], + "angle": 0, + "content": "Pixel-SAIL" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.417, + 0.556, + 0.496 + ], + "angle": 0, + "content": "Please segment the flower." + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.511, + 0.332, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.511, + 0.501, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.51, + 0.555, + 0.605 + ], + "angle": 0, + "content": "Please segment the person sitting on the floor." + }, + { + "type": "image", + "bbox": [ + 0.558, + 0.418, + 0.726, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.726, + 0.418, + 0.896, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.558, + 0.511, + 0.727, + 0.6 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.727, + 0.511, + 0.897, + 0.6 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.304, + 0.615, + 0.694, + 0.63 + ], + "angle": 0, + "content": "Figure 8. Visualization Comparison of Sa2Va and Pixel-SAIL." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.959 + ], + "angle": 0, + "content": "4" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10465/9d3901f2-eace-4793-8815-51f41b459e25_origin.pdf b/data/2025/2504_10xxx/2504.10465/9d3901f2-eace-4793-8815-51f41b459e25_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3bb93547056d0dc8f7372477eab2a8fa667e877c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/9d3901f2-eace-4793-8815-51f41b459e25_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee20091af6a26d8ac12f74290de50e604d1a6ad3b4d280a402254fd7fd518a51 +size 19392631 diff --git a/data/2025/2504_10xxx/2504.10465/full.md b/data/2025/2504_10xxx/2504.10465/full.md new file mode 100644 index 0000000000000000000000000000000000000000..306e05e9d1acec0e31c1c564a6213130b0ad6518 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/full.md @@ -0,0 +1,474 @@ +# Pixel-SAIL: Single Transformer For Pixel-Grounded Understanding + +Tao Zhang $^{1,2}$ Xiangtai Li $^{1}$ Zilong Huang $^{1}$ Yanwei Li $^{1}$ Weixian Lei $^{1}$ Xueqing Deng $^{1}$ Shihao Chen $^{2}$ Shunping Ji $^{2}$ Jiashi Feng $^{1}$ Bytedance Seed $^{2}$ WHU + +Project Page: https://zhang-tao-whu.github.io/project/pixelsail + +![](images/d6b641908f41c0263a174252592424901ed71c8c03ba38b6916ca8c12726c9d7.jpg) +(a), Multi-modal Fusion with extra decoders + +![](images/2ad3d40deefda5121871be7727625f1a4a15c5424e7c69bfaeec2b5381446be6.jpg) +(b), MLLM with segmentation experts +Figure 1. Comparison of current MLLMs for pixel-wise understanding with our method. (a) and (b). Current MLLMs for pixel-wise understanding feature highly complex system architectures, including an LLM, a CLIP-like vision backbone, an object token extraction model, a segmentation vision backbone, and a SAM-like decoder. (c). Our method employs only a single transformer. + +![](images/efec56ff4c133b921837f8320aa383f2ce9a78bb129bdb23c2b4f1b278cce84e.jpg) +(c), Pixel SAIL with one single transformer + +# Abstract + +Multimodal Large Language Models (MLLMs) achieve remarkable performance for fine-grained pixel-level understanding tasks. However, all the works rely heavily on extra components, such as vision encoder (CLIP), segmentation experts, leading to high system complexity and limiting model scaling. In this work, our goal is to explore a highly simplified MLLM without introducing extra components. Our work is motivated by the recent works on Single trTransformer as a unified vVision-Language Model (SAIL) design, where these works jointly learn vision tokens and text tokens in transformers. We present Pixel-SAIL, a single transformer for pixel-wise MLLM tasks. In particular, we present three technical improvements on the plain baseline. First, we design a learnable upsampling module to refine visual token features. Secondly, we propose a novel visual prompt injection strategy to enable the single transformer to understand visual prompt inputs and benefit from the early fusion of visual prompt embeddings and vision tokens. Thirdly, we introduce a vision expert distillation strategy to efficiently enhance the single transformer's fine-grained feature extraction capability. In addition, we have collected a comprehensive pixel understanding benchmark (PerBench), using a manual check. It includes three tasks: + +detailed object description, visual prompt-based question answering, and visual-text referring segmentation. Extensive experiments on four referring segmentation benchmarks, one visual prompt benchmark, and our PerBench show that our Pixel-SAIL achieves comparable or even better results with a much simpler pipeline. Code and model will be released at https://github.com/magicresearch/Sa2VA. + +# 1. Introduction + +Multi-modal Large Language Models (MLLMs) have garnered significant research efforts, driven by advancements of Large Language Models (LLMs) [22, 56, 65]. While most studies focus on open-ended visual question answering tasks, there is a growing interest [51, 80] in fine-grained, pixel-level understanding. This enables broader applications, such as facilitating precise region-level editing and generation and achieving precise understanding of designated mask regions. + +Recent pixel-wise MLLMs [27, 51, 54, 63, 72, 80, 81] mainly adopt visual and language fusion frameworks, following design patterns [17, 42, 68] established before the LLM era. For example, LAVIT [68] adopts encoder-fusion approach, injecting language embedding (generated by BERT [13]) into vision transformers. With the advent of + +LLMs [22, 65, 66], recent works [27, 54, 72, 80] integrate state-of-the-art segmentation models [26, 33, 53], for pixel-level understanding, by either appending them to LLM outputs or embedding LLM within segmentation pipelines. While effective, the overall architectures are complex, requiring specialized components such as vision-language fusion modules and additional decoders. Moreover, their final performance often heavily depends on either MLLMs or the segmentation models, which may lead to suboptimal results due to limitations within individual submodules. + +In this work, we explore a novel, simple yet effective pixel-wise MLLM design, drawing inspiration from recent advancements in SAIL architecture, which is also called Encoder-free MLLMs. These methods drop the extra vision encoder and jointly co-train vision and language tokens on large scale datasets, with a simpler design. Moreover, they show competitive performance on image-level VQA tasks, compared with LLaVA. Motivated by this success, we extend the framework to pixel-level understanding tasks, aiming to reduce the complexity of existing approaches. To the best of our knowledge, this is the first study to explore the simplest architecture for pixel-wise MLLM tasks, including referring segmentation and visual prompt understanding. + +We first directly extend SAIL architecture by adding segmentation token and visual prompt tokens to generate segmentation masks and output region caption, following previous works [27, 51, 74]. However, this leads to inferior results on both segmentation and visual prompt understanding. Several reasons are: (1), The misalignments on high resolution features since there are no segmentation decoders since SAIL directly reshape the vision tokens into features. (2), Previous works directly adopt mask pooling on high level visual tokens where SAIL baseline only maps RGB inputs with one projection layer, where most tokens are low level features. (3), The mask quality is low since no segmentation experts are involved. + +To solve these problems, we present three simple technical improvements, which lead to our Pixel-SAIL framework. First, we design a simple learnable up-sampling module to refine the low resolution visual tokens in high resolution features. Our goal is to keep the design as simple as possible, where only one transposed 2D convolution is involved. Then, for visual prompt understanding, we design a novel visual prompt injection method, where we map the visual prompts into special text tokens without introducing extra visual prompt encoder in the middle stage of SAIL. Next, we propose to distill the previous segmentation experts into SAIL to improve mask quality. All the improvements are plug-in-play, and we verify the effectiveness on various SAIL architectures, including SOLO [8] and EVEv2 [16]. + +Then, to further indicate the effectiveness of our Pixel-SAIL and facilitate the development of pixel-LLM com + +munity, we further design a new challenging benchmark, PerBench. Compared with previous pixel-wise MLLM benchmarks, we have three innovative and challenging features. First, we include a detailed object caption where most existing benchmarks only contain short captions without fine-gained contents. Secondly, we re-evaluate visual-prompt understanding as multi-choice VQA tasks following MME [20] and MMBench [43] to achieve more accurate region caption evaluation. Thirdly, we introduce a task by segmenting objects jointly referenced by visual prompts and text. Our benchmark reveals the limitation of current state-of-the-art pixel-wise MLLM on fine-grained understanding and mixed referring tasks. + +Pixel-SAIL is jointly co-trained with mixed data engine on referring segmentation datasets, VQA datasets, and visual prompt datasets. Experimental results show that our method can achieve better results on five pixel-wise benchmarks. In particular, on RefCOCOg and RefCOCO+ datasets, our method with 3B size can outperform previous pixel MLLMs, including GLaMM (7B) and OMG-LLaVA (7B), by $1.5 - 3.0\%$ with a simpler pipeline. On our Per-Bench, our method achieves 24.2 METEOR, $74\%$ accuracy, 33.4 cIoU and 42.2 overall score, surpassing the SOTA MLLMs GLaMM (7B) and Sa2VA (4B) with overall scores of 26.9 and 3.2, respectively. + +# 2. Related Work + +Large Vision Language Models. Staring from CLIP [50] and ALIGN [24], modern vision language models have adopted contrastive learning on large-scale image-text datasets for learning vision-text aligned representations. The trained models are also proven to work well on open-vocabulary perception, such as segmentation [45, 71, 78, 79] and detection [21, 58, 61, 75]. The following works [31, 32, 64, 76] share the same network design, exploring modified loss functions and targeting data quality and filtering. Then, with the rise of large language models [5, 22, 56, 65], recent works [1, 10, 11, 40, 55, 77] mainly focus on multimodal large language models for open-ended settings, such as visual question answering or OCR benchmarks. On representative work, LLaVA [40], uses the CLIP to encode images into visual tokens and sends the visual tokens to LLMs. After that, the following works [1, 30, 41] improve designs with scaled high quality datasets, images, and videos constraining. Meanwhile, several recent works [8, 14, 16, 46] also explore the visual encoder-free designs, which jointly learn the image and text representation in a single transformer architecture. For example, SOLO [8] collects mixed language and vision datasets and trains one transformer for VQA tasks, while EVE [14] designs a CLIP supervision to enhance visual token learning. Our work follows the visual encoder-free design, and we go a step further by exploring pixel-grounded understanding tasks, including ground + +ing tasks and visual prompt understanding. To our knowledge, we are the first to apply encoder-free architecture for pixel-grounded understanding tasks. + +Referring Expression Segmentation. This task outputs specific masks driven by text description. Earlier works [19, 23, 36, 39, 67] explore various fusion architecture and modules to enhance text and vision feature alignments. Equipped with LLMs, several recent advanced works [27, 48, 49, 51, 63, 72, 73, 80, 82] propose more complex referring tasks, including reasoning referring or joint mask and caption generation. In particular, LISA [27] involves complex expression while GLaMM [51] annotates a new dataset and proposes region-level caption and segmentation tasks. However, all these works contain complex designs: extra vision encoders, segmentation encoders, mask decoders, and prompt encoders. Our method, Pixel-SAIL, only has one transformer to jointly learn the joint visual and language feature. With proposed data engine and improved methods, Pixel-SAIL achieves good results with much simpler architecture. + +Visual Prompt Understanding. Understanding visual prompts plays an important role when building interaction between VLMs and human. Recent works [4, 38, 47, 51, 74] build new visual prompt datasets for region caption generation and prompt-aware VQA tasks. ViP-LLaVA [4] overlays the visual prompts directly onto the image canvas and fine-tunes the LLaV on a specific visual prompt dataset, while Osprey [74] explores pixel-wise mask regions into language instructions. Our method can also be extended into visual prompt understanding with our proposed prompt token injection design. + +# 3. Method + +# 3.1. Encoder Free MLLM and Plain Baseline + +Recently, several encoder-free MLLMs [8, 15, 16, 46] achieve comparable performance with those extra vision encoders. These models jointly learn vision and text features in a single transformer, with much simpler architecture. In particular, SOLO uses a simple project layer to map the image into visual tokens and then combines language tokens as the inputs of the transformer. However, no works have explored such new architecture for fine-grained vision language tasks (region caption, referring masks). + +Plain Baseline. To fill this gap, we first construct a plain single transformer baseline, motivated by the previous ViT-based MLLMs [27, 72]. We start it with a pre-trained encoder-free MLLM. For segmentation tasks, we modify previous mask generation methods into the single transformer. First, we reshape the hidden states of the last transformer layer of vision tokens $\mathcal{V} \in \mathbb{R}^{N \times C}$ into image features $\mathcal{F} \in \mathbb{R}^{\frac{H}{S} \times \frac{W}{S} \times C}$ . $N$ represents the number of vision tokens, $C$ denotes the channel size, $H$ and $W$ indicate + +the height and width of the image, $S$ stands for the down-sampling stride. Then, the image features are then crossmultiplied with the hidden states of the predicted segmentation token $\mathcal{Q} \in \mathbb{R}^{K \times C}$ to generate the segmentation masks $\mathcal{M} \in \mathbb{R}^{K \times \frac{H}{S} \times \frac{W}{S}}$ . $K$ signifies the number of predicted segmentation tokens, following previous works [27, 51]. For visual prompt understanding, we employ a pooling-based method [74] to derive object representations $\mathcal{O} \in \mathbb{R}^{M \times C}$ from image patch embeddings $\mathcal{P} \in \mathbb{R}^{\frac{H}{P} \times \frac{W}{P} \times C}$ . These object embeddings are fed into the single transformer to represent the corresponding objects. $M$ represents the number of visual prompts, and $P$ denotes the patch size. For segmentation tasks, we adopt extra mask loss. Otherwise, we adopt the same text loss for VQA tasks and visual prompt understanding tasks. + +Limitation. The plain baseline demonstrates a certain level of pixel-text alignment capability since both segmentation token and visual prompt token are jointly learned with vision and language tokens. However, the plain baseline exhibits several significant shortcomings: 1) The segmentation mask quality is poor due to the large feature down-sampling stride (16 or 32), even when using simple pixel shuffle or bilinear interpolation for up-sampling. 2) The single transformer struggles to comprehend the referential target of object representation, as the object representation is summarized from image patch embeddings with poor semantic information. + +# 3.2. Pixel-SAIL Method + +Given the substantial shortcomings, the performance of plain baseline in fine-grained pixel understanding tasks falls significantly, compared to vision-expert competitors (Sec.4). To solve these challenges, we have implemented three key enhancements to the baseline architecture. First, we integrate a learnable up-sampling module to fully exploit the segmentation capabilities of the single transformer architecture. Second, we develop an innovative visual prompt injection mechanism that facilitates effective interpretation of visual prompt inputs. Our method enables early-stage fusion between vision tokens and visual prompt embeddings. Finally, we introduce a dense feature distillation strategy that significantly improves the model's capacity for extracting fine-grained visual features. These improvements collectively address the shortcomings of the plain baseline while maintaining its architectural simplicity. + +Learnable Up-sampling Module. Inspired by [35], we also incorporate a simple learnable up-sampling model $\mathcal{U}$ to generate the high-resolution features $F_{h} \in \mathbb{R}^{\frac{H}{4} \times \frac{W}{4} \times C}$ essential for pixel-level grounding. The up-sampling module comprises multiple up-sampling blocks, each consisting of a transposed 2D convolution followed by a depth-wise convolution. It effectively upscales the low-resolution features $F_{l} \in \mathbb{R}^{\frac{H}{S} \times \frac{W}{S} \times C}$ , derived from resized vision tokens, + +![](images/4d7c01c3de069a344975deff39094f9b32939af00d3ab7023456d2e4efc96069.jpg) +Figure 2. The architecture of our proposed plain baseline and Pixel-SAIL. Pixel-SAIL is as simple and elegant as the plain baseline but demonstrates significantly improved performance. The examples on the right demonstrate that Pixel-SAIL possesses the capability for general conversation and comprehensive pixel-grounded understanding. + +to one-quarter of the original resolution. + +Visual Prompt Injection. Previous works [51, 72, 74] summarize the referenced object features via pooling on vision tokens from ViT encoder. However, there are no such visual tokens for encoder-free MLLMs. Thus, the inherent semantic deficiency hinders the single transformer's ability to precisely identify referenced objects based solely on feature summaries derived from patch embeddings, where most are low-level cues, such as edges. + +To overcome this limitation, we propose an innovative visual prompt injection mechanism. Our approach integrates multiple visual prompt special tokens $\{VP_{i}|i\in [1,N]\}$ into the large language model's vocabulary. These tokens' text embeddings $\mathcal{V}\mathcal{P}^t\in \mathbb{R}^{N\times C}$ are used to fill mask-based visual prompts $\mathcal{M}^{vp}\in \mathbb{R}^{N\times \frac{H}{P}\times \frac{W}{P}}$ , thereby creating visual prompt tokens $\mathcal{V}\mathcal{P}\in \mathbb{R}^{\frac{HW}{P^2}\times C}$ . The vision tokens $\mathcal{V}\in \mathbb{R}^{\frac{HW}{P^2}\times C}$ are first added with these visual prompt tokens $\mathcal{V}\mathcal{P}$ before being processed by the single transformer. This enhancement enables the model to accurately identify referenced objects by leveraging the corresponding special tokens $\{VP_{i}|i\in [1,N]\}$ within the text instructions. + +Dense Feature Distillation. Due to the lack of large-scale, high-quality segmentation data like SA-1B [26], the method produces poor-quality masks, particularly at object boundaries. However, directly training on large-scale segmentation datasets would be costly and damage the original instruction following capabilities. To address both, we employ pre-trained segmentation experts to distill the single transformer, ensuring optimization of object details without hurting VQA capabilities. We perform distillation by leveraging mask features generated by Mask2Former's [12] pixel decoder on the upsampled mask features $F_{h} \in \mathbb{R}_{\frac{H}{4}}^{\frac{H}{4} \times \frac{W}{4} \times C}$ and utilizing features produced by SAM2's [53] encoder + +![](images/fbef5dad172896c2b1b5166bd4ab0c83cd1af418d9f92e034ba690b473c1a42b.jpg) +Figure 3. Visual examples on our PerBench. Best view it in color and zoom in. + +on the low-resolution features $F_{l} \in \mathbb{R}^{\frac{H}{S} \times \frac{W}{S} \times C}$ . This simple distillation strategy improves segmentation quality with only a negligible increase in training time. + +# 3.3. Benchmark and Dataset Engine + +Our Benchmark: PerBench. We further manually annotate a benchmark named PerBench (Pixel-grounded Understanding Benchmark). PerBench aims to address three aspects lacking in existing pixel grounding benchmarks. + +The first aspect is detailed object caption. Previous works [6, 34] have emphasized more detailed image captions, demonstrating that comprehensive captions signifi + +cantly enhance model performance. However, current object caption datasets such as Osprey-724k [74] and evaluation benchmarks like Refcocog provide only cursory object captions. To address this limitation, we leverage SOTA models InternVL2.5-78B [11] and Qwen2.5VL-72B [2] to generate detailed object captions. These detailed object captions are then meticulously screened and refined through manual review, ultimately yielding 500 precise, nuanced object captions to serve as a robust evaluation benchmark. METEOR [3] serves as the evaluation metric for the detailed object caption task. + +The second aspect is the assessment of visual-prompt understanding ability in multiple-choice format. Although captioning tasks can accurately reflect a model's visual prompt understanding ability, precise and fair evaluation is difficult. Rule-based metrics such as CIDEr [57] and METEOR [3] are affected by response length, format, and ground-truth quality, while using models as evaluators inevitably introduces model bias. Therefore, a fair and quantitative visual-prompt understanding benchmark is necessary. Inspired by MMBench [43] and MME [20], we manually annotated 500 multiple-choice questions based on detailed object captions, covering the examination of models' understanding of referenced objects' appearance, attributes, uses, and relationships with surrounding objects. MLLMs need to perceive the attributes of referenced objects accurately and have instruction-following ability to select the appropriate choice correctly. Accuracy is selected as the evaluation metric for the visual prompt-based multiple-choice questions. + +The third aspect is segmenting objects jointly referenced by visual prompts and text, abbreviated as V-T RES. It aims to test the model's ability to understand objects indicated by user-input visual prompts and segment associated objects according to text instructions. This task comprehensively assesses the MLLM's pixel-grounded understanding ability, requiring the model to possess precise visual prompt understanding capabilities, text reasoning abilities, and pixel grounding skills. We also manually annotate 500 V-T RES samples, which five expert annotators double-check. Similar with RefCOCO series datasets, we select cIoU and gIoU as the evaluation metric for V-T RES task. The overall score of PerBench is the average of the normalized scores (0-100) from the above three tasks. + +Our benchmark can be used to evaluate pixel-wise MLLMs and point out more challenging directions for detailed object understanding, joint visual prompts, and text understanding to the current community. + +Dataset Engine. To fully unleash the potential of the single transformer, we collect diverse pixel-grounded data, including segmentation datasets and visual-prompt understanding datasets, following previous works [16, 46]. + +For segmentation-related data, we first use Ref- + +COCO+/g [25, 70] and COCO [37] semantic segmentation data used in LISA [27], the Grandf dataset (214k samples) used in GLaMM [51], and MUSE data (246k samples) used in PixelLM [54]. We also use recent Pixel2Cap [69] data (comprising 20k images) and organized it into the referring segmentation format. Finally, we further add COCO [37] panoptic segmentation data and structured it as: "Question: Please segment the {class name} in instance mode. Answer: {class name}-1 [SEG], ..., {class name}-n [SEG]." + +For visual prompt understanding, we employ two public datasets: Osprey-724k [74] and Pixel2Cap [69]. Additionally, we reformat the COCO dataset into a question-answer structure specifically designed to query object categories. To enhance the model's capability for fine-grained object description, we prompt the InternVL2.5-78B [11] model to generate approximately 300k detailed object captions derived from 10k SA-1B [26] images. Lastly, to maintain the instruction following ability, we also integrate the LLaVA1.5 [40] 665k dataset into our training data. + +Training. We combine all the aforementioned data for cotraining. The loss function consists of the next token prediction loss $\mathcal{L}_{ntp}$ , the segmentation loss $\mathcal{L}_{seg}$ , and the distillation loss $\mathcal{L}_{distill}$ : + +$$ +\mathcal {L} = \mathcal {L} _ {n t p} + \mathcal {L} _ {s e g} + \alpha \mathcal {L} _ {\text {d i s t i l l}}, \quad \mathcal {L} _ {s e g} = \lambda \mathcal {L} _ {c e} + \beta \mathcal {L} _ {\text {s e g}}, \tag {1} +$$ + +where $\alpha$ is set to 0.5, $\lambda$ to 2.0 and $\beta$ to 0.5. + +# 4. Experiment + +Implementation Details. We extensively evaluate our meta-architecture using two open-source encoder-free multimodal large language models: SOLO [8] and EVEv2 [16]. For SOLO, following [28], we modify the attention mechanism between vision tokens from causal attention to full attention and conduct supervised fine-tuning on the LLaVA1.5 665k dataset. For SOLO, we modify the attention mechanism between vision tokens from causal attention to full attention and replace the LLM with Qwen2.5 [66] 0.5B and 3B, respectively. For EVEv2, we retain its original architecture and weights without any modifications. We build Pixel-SAIL 0.5B and 3B based on our modified SOLO baseline, and 7B on EVEv2. When training Pixel-SAIL based on SOLO, we maintain the original resolution of input images. For images with a long side exceeding 1024, we preserve the aspect ratio and resize the long side to 1024. When training Pixel-SAIL based on EVEv2, we resize the images to the closest to $800^2$ pixels to reduce training costs, which differs from the original setting of $1600^2$ . The training process is conducted on 32 A100 (80GB) GPUs using the AdamW [44] optimizer with a cosine decay learning rate scheduler. We set the initial learning rate to 4e-5, the warm-up ratio to 0.03, and the batch size to 256. The training duration for the 0.5B and 3B models is 12 hours and 24 hours, respectively. + +Table 1. Performance on referring segmentation benchmarks. The evaluation metric is cIoU. "ft" denotes fine-tuning on the specific dataset. + +
MethodLLM SizeRefCOCO+RefCOCOgRefCOCOgRefCOCO
valtestAtestBval(U)test(U)valtestAtestBvaltestAtestB
Referring Segmentation Specialist Without MLLM
VLT [17]-56.361.050.155.057.767.570.565.252.562.250.5
CRIS [59]-62.368.153.759.960.470.573.266.155.363.851.0
LAVT [68]-62.168.455.161.262.172.775.868.857.665.355.0
PolyFormer-L [42]-69.374.661.969.270.276.078.373.3---
ReLA [39]-66.071.057.765.066.073.876.570.256.459.058.4
MLLMs With Vision Expert
LISA (ft) [27]7B65.170.858.167.970.674.979.172.3---
PixelLM [54]7B66.371.758.369.370.573.076.568.2---
GSVA (ft) [63]7B64.567.758.671.172.076.477.472.861.769.260.3
GroundHog [81]7B70.575.064.974.174.678.579.975.766.7--
GlaMM (ft) [51]7B72.678.764.674.274.979.583.276.9---
SAM4MLLM [9]7B73.577.865.874.575.679.682.876.166.370.163.2
LaSagnA [60]7B66.470.660.170.671.976.878.773.838.150.442.1
OMG-LLaVA (ft) [80]7B69.173.163.072.972.978.080.374.1---
F-LLM [62]7B65.875.258.570.171.775.879.572.4---
Sa2VA [72]4B74.3--76.7-80.4-----
MLLMs Without Vision Expert
Pixel-SAIL0.5B70.875.865.475.476.777.980.575.963.971.563.6
Pixel-SAIL (ft)0.5B73.077.068.075.676.179.181.777.068.074.066.8
Pixel-SAIL3B75.779.772.078.780.480.882.679.067.774.667.1
Pixel-SAIL (ft)3B76.279.771.278.579.481.883.478.872.177.170.4
+ +Table 2. Region caption performance on RefCOCOg dataset. + +
Method SizePixel-SAIL 0.5BPixel-SAIL 3BSa2VA 4BOMG-LLaVA 7BOsprey 7BGLaMM 7B
METEOR16.017.617.315.316.616.2
+ +Table 3. The performance on our PerBench. Due to the lack of visual prompt understanding capability, LISA scores 0 on all tasks. + +
ModelSizeDetailed Caption METEORMCQ AccV-T RESOverall Score
cIoUgIoU
LISA [27]7B00000
Osprey [74]7B13.40.12008.5
GLaMM [51]7B12.60.1424.314.615.3
Sa2VA [72]4B19.20.7131.921.939.0
Pixel-SAIL0.5B21.40.6929.719.838.4
Pixel-SAIL3B24.20.7433.423.542.2
+ +Table 4. Performance on the VQA benchmarks. $\star$ refers to the use of an $800^{2}$ resolution, which differs from the $1600^{2}$ resolution in the pre-trained model. + +
ModelLLM SizeMMEMMBenchSEEDMMStar
SOLO0.5B523.2/222.513.845.526.2
SOLO3B1155.7/257/553.465.440.3
EVEv2*7B1128.0/240.760.354.244.9
Pxiel-SAIL0.5B564.1/150.731.852.226.3
Pixel-SAIL3B1187.3/242.956.366.140.1
Pixel-SAIL*7B1081.0/260.458.964.744.3
+ +Evaluation Setup. For visual prompt understanding and general image QA tasks, we adhere to the same setting as the base MLLM. In the case of segmentation-related tasks, if the model fails to predict a [SEG] token, we compel it + +to produce a [SEG] token to ensure the generation of the segmentation result. + +# 4.1. Main Results + +Results on Referring Segmentation Benchmarks. We compare Pixel-SAIL with other pixel-grounded MLLMs and segmentation specialists on the RefCOCO+ [70], RefCOCOg [70], RefCOCO [25], and gRefCOCO [39] datasets. The comparison results are shown in Tab. 1. Pixel-SAIL 0.5B achieved 70.8, 75.4, and 77.9 cIoU on the validation splits of RefCOCO+, RefCOCOg, and RefCOCO, outperforming all segmentation specialists with comparable model sizes while also maintaining image conversation capabilities. Compared to the classical SAM-based MLLM competitor LISA-7B [27], Pixel-SAIL 0.5B surpassed it by 4.2, 7.9, and 7.8 cIoU on RefCOCO, RefCOCO+, and RefCOCOg respectively, despite having a much smaller model size (0.5B vs. 7B). On the more complex gRefCOCO dataset that includes multi-object segmentation, Pixel-SAIL 0.5B outperformed the carefully designed GSVA-7B [63] by 6.3, 4.8, and 6.5 cIoU on validation, testA, and testB splits respectively. + +When scaling the model to 3B, Pixel-SAIL achieved 75.7, 78.7, 80.8, and 67.7 cIoU on RefCOCO+, RefCOCOg, RefCOCO, and gRefCOCO datasets respectively, surpassing all larger-sized (7B) MLLMs assisted with vision experts. Pixel-SAIL-3B even outperformed the SOTA Sa2VA-4B [72] (which uses the powerful InternVL2- + +Table 5. Ablation study on the components of Pixel-SAIL. "RC" denotes region caption on RefCOCOg dataset. + +
ModelRefCOCO+/gRC
Plain Baseline64.5/57.3/60.11.0
+ Upsampling69.7/62.5/65.30.9
+ Training Data76.2/69.6/73.81.4
+ VP Injection77.4/70.4/75.216.1
+ Distillation77.9/70.8/75.416.0
+ +Table 6. Ablation study on Base MLLM. The training data only includes LLaVA-665k and Ref-COCO+/g. + +
MLLMSizeRefCOCO/+/g
SOLO0.5B69.7/62.5/65.3
SOLO3B73.2/66.4/69.1
EVEv27B74.9/68.7/71.3
+ +caption on RefCOCOg dataset. + +
DataRefCOCO+/gRC
Basic Data69.7/62.5/65.3-
+ Seg Data76.2/69.6/73.8-
+ VP Data77.4/70.4/75.216.1
+ +Table 7. Ablation on the train- Table 8. Ablation study on ing data. "RC" denotes region the distillation strategy. + +
DataRefCOCO+/g
w/o Distill77.5/70.5/75.5
M2F77.7/71.0/75.8
SAM277.8/70.9/75.9
Both78.1/70.8/76.1
+ +4B [10] and SAM2-L [53]), achieving performance advantages of 1.4 and 2.0 cIoU on the more challenging RefCOCO+ and RefCOCOg datasets respectively. + +Results on Visual Prompt Understanding Benchmarks. We evaluate the region caption performance on the RefCOCOg dataset, with results shown in Tab. 2. The training dataset of Pixel-SAIL does not include the RefCOCOg region caption dataset, so we directly evaluate its zero-shot performance. Pixel-SAIL-0.5B achieves a METEOR score of 16.0, surpassing OMG-LLaVA 7B by 0.7 points. When scaling the model to 3B, Pixel-SAIL achieves a METEOR score of 17.6, outperforming carefully designed larger models such as Osprey 7B and GLaMM 7B by 1.0 and 1.4 points respectively. + +Results on PerBench. We have benchmarked several popular pixel-grounded MLLMs on our proposed PerBench, with results shown in Tab. 3. LISA [27] scores 0 points across all tasks due to its inability to understand visual prompt inputs. Osprey [74] demonstrates strong object caption capabilities; however, it achieved only 13.4 METEOR in detailed caption tasks and $12.0\%$ accuracy in MCQ tasks due to limitations from short object caption lengths in its training data and impaired instruction-following ability. GLaMM [51] and Sa2VA [72] both exhibit comprehensive prompt understanding and segmentation capabilities, though GLaMM's weaker instruction-following ability resulted in only $14.0\%$ accuracy in MCQ tasks. PixelSAIL-0.5B achieves an overall score of 38.4, comparable to Sa2VA-4B despite Pixel-SAIL having a more powerful base MLLM and segmentation expert. Notably, Pixel-SAIL-3B achieves an overall score of 42.2, outperforming Sa2VA-4B across all three tasks. + +Results on VQA Benchmarks. We compare the visual question answering performance of Pixel-SAIL with the corresponding base MLLMs on the MME [20], MM-Bench [43], SEED [29], and MMStar [7] benchmarks, and the results are presented in Tab. 4. When the model size is 0.5B, Pixel-SAIL demonstrates performance improvements over the base MLLM across all four benchmarks, particularly on MMBench, where the score increased from 13.8 to 31.8. However, when the model size is 3B and 7B, Pixel-SAIL's performance is on par with that of the base MLLMs, which may be constrained by the current quantity (less than 2M) and quality of visual prompts and segmentation data. + +![](images/5e29fcd4523ca0d6da3e40f45f1513abc6492464fdbacd397a078097c046b16e.jpg) + +![](images/bf21200e51b1a51097e0d3cc0511c985b0448f7206b378f04cc6aff1759b35af.jpg) + +![](images/b1cbbc683726f798824e46d98003ddc8d8e08482742e724218e33775a000ce62.jpg) + +![](images/ec4c320c8bb602c970cf6d5f5b650ee874030055777473a9e2cfe4a35408d8f6.jpg) + +![](images/8514ef98ba120a98f43055533243a21f0110a475a9d3057fb2ce9a607d8d7ff4.jpg) + +![](images/4d78f012581df715eb27cab4d9168bf99ced4bfddc463564b571508b9fb66030.jpg) + +![](images/9b7217328723a584f890fb9982f3e03057486311027c5ff17f4e48f7003ad32e.jpg) + +![](images/aa0956d34ee08556b170855ddc7b77d845501068b701513f9dd4cd5aa1f0b6cb.jpg) + +![](images/a312e96e807c99c7975509f3cc30a8650cb5647a303a336f39244b88fb17a222.jpg) + +![](images/555602258ffdfb0cda0578a5201c1603ada5da1407bdc869aa0c325320e39050.jpg) + +![](images/fb4f0cc86bc8fa166e3d2d93309b4d2f49a2774966f93cbc596b8fc90285bfc2.jpg) + +![](images/f42004728503f31551f10e09916551225730c7ae2579b188ed67c1f60bb2c337.jpg) +Figure 4. Visualization results of Pixel-SAIL on diversity tasks. Best view it in color and zoom in. From top to bottom are visual prompt-based object caption, single/multi-object referring segmentation, vision-text referring segmentation, image caption and QA, and visual-prompt based conversation. Visual prompts in the form of points and boxes are converted into mask prompts using SAM [26]. For more visualization results and comparisons with other MLLMs, please refer to the appendix. + +![](images/1748944d684d603bdbb18ef6a58cc5f55b83c868283d1ba3cb803734338aa333.jpg) + +![](images/8c0aa32b9248a9481769b0b1ab82aaee0bd5d496ea09b8a99583223e7dc02225.jpg) + +![](images/8b9e4c68f6ea43f611dad57e5712dd70f8478ec89248fbfbeb3f59bd966df9c9.jpg) + +![](images/5a3cb155b0ff4d71d1da04b93abb3ec77b64982962df8f4621fb90b63ba49b49.jpg) + +![](images/c9e0341d3155c0159d45850ef3883c1f855c59deeb7dcf71f646e5a702932d8a.jpg) + +![](images/82720d858e972b556426d0d7a59e765573479959ded0e27c4c7c0c3bcaa4ae1e.jpg) + +![](images/576a8003516e688ddfec0f59b4c386ac5993c297f63a6a14bea084ff9ff5f7df.jpg) + +![](images/cec28f82f78a1ff66461359b6cf56928057d0bb8f10e5c03c2af24f3188d7bfc.jpg) + +![](images/ddd809653d8358f3fa9d942f2ed362b8eb1d49d2406959486c808b219fa5bd1b.jpg) +Figure 5. Image feature visualization results. From left to right are the image feature of the base MLLM, the image feature of Pixel-SAIL, and the mask feature of Pixel-SAIL. + +![](images/956d0f979008f483a5008e483c6d996e4b1db14407264f0f0477c843be8c3d9a.jpg) + +![](images/80c579cddfe2ed559771ad16801a79afb29c49427d171ce49295ed1addfbe13c.jpg) + +![](images/51a610628940f7450f2cce02aab7c97bd296dcf6d63c3709ae587ebcc9940c11.jpg) + +# 4.2. Ablation Studies + +Effectiveness of Each Component. We conduct comprehensive ablation studies on the proposed components, with results presented in Tab. 5. Our plain baseline, trained with LLaVA-665k and RefCOCO+/g data, achieves only 64.5, 57.3, and 60.1 cIoU on the RefCOCO, RefCOCO+, and RefCOCOg datasets, respectively. Moreover, this baseline completely fails on the visual prompt understanding task, attaining merely 1.0 METEOR on the region caption task. Upon incorporating the learnable upsampling mod + +ule, segmentation quality improves dramatically, with the model reaching 76.2, 69.6, and $73.8\mathrm{cIoU}$ on RefCOCO, RefCOCO+, and RefCOCOg. However, the model still cannot effectively interpret user-input visual prompts due to insufficient semantic information in the object representation. When we scale up the training data by introducing substantial amounts of segmentation data and visual-prompt understanding data, the model's segmentation capabilities are further enhanced. Despite scaling the training data, the model continues to struggle with visual prompt inputs because of the limited semantic information in the object representation. After implementing our proposed visual prompt injection mechanism, the model demonstrates significant improvements in visual prompt understanding, achieving 16.1 METEOR on the region caption task. Interestingly, we observe that enhanced visual prompt understanding capabilities positively influence referring segmentation performance. Finally, incorporating the distillation strategy further refines the model's detailed segmentation quality. + +Ablation on Various MLLMs. To demonstrate the effectiveness of Pixel-SAIL, we validate across different architectures and sizes, with results shown in Tab. 6. To reduce training costs, we use only LLaVA-665k and RefCOCO $+ / \mathrm{g}$ data for training and evaluate on the referring segmentation task. When using our modified 0.5B SOLO as the base MLLM, Pixel-SAIL achieves cIoU scores of 69.7, 62.5, and 65.3 on RefCOCO $+ / \mathrm{g}$ . When scaling the model size to 3B, Pixel-SAIL's performance improves by 3.5, 3.9, and 3.8 cIoU on RefCOCO $+ / \mathrm{g}$ . When using EVEv2-7B as the base MLLM, despite the attention between vision tokens changing from full attention to causal attention and the architecture transitioning to an MOE architecture, Pixel-SAIL achieves cIoU scores of 77.4, 70.4, and 75.2 on RefCOCO $+ / \mathrm{g}$ , demonstrating that performance consistently increases with model scaling. + +Ablation on Data Scaling. Data plays a crucial role in the performance of Pixel-SAIL. As shown in Tab. 7, we conduct comprehensive ablation studies on the training data to evaluate its impact. When trained solely with basic data (including LLaVA-665k and RefCOCO+/g datasets), Pixel-SAIL achieves 69.7, 62.5, and 65.3 cIoU on RefCOCO, RefCOCO+, and RefCOCOg, respectively. Upon scaling the segmentation-related data, Pixel-SAIL demonstrates significant performance improvements of 6.5, 7.1, and 8.5 cIoU on these datasets. Furthermore, incorporating visual prompt data for mixed training not only enhances the model's visual prompt understanding capabilities but also yields additional performance gains of 1.2, 0.8, and 1.4 cIoU on RefCOCO, RefCOCO+, and RefCOCOg, respectively. + +Ablation on Distillation Strategy. Distillation is a highly effective method for infusing knowledge into Pixel-SAIL. We conduct ablation studies on the distillation strategy, and + +the results are presented in Tab. 8. We use the average cIoU across all splits as the evaluation metric. When only Mask2Former [12] is employed to distill high-resolution mask features, Pixel-SAIL achieves performance gains of 0.2, 0.5, and 0.3 on RefCOCO $+ / \mathrm{g}$ . When SAM2 [53] is used to distill low-resolution image features, Pixel-SAIL obtains performance improvements of 0.3, 0.4, and 0.4 on RefCOCO $+ / \mathrm{g}$ . When both teacher models are utilized collaboratively, performance gains of 0.6, 0.3, and 0.5 are achieved. Additionally, the extra computational cost introduced by the distillation strategy is minimal, increasing the training time by only about $5\%$ for Pixel-SAIL-0.5B. + +# 4.3. Visualization Analysis + +Visual Comparison. In Fig. 4, we showcase Pixel-SAIL's visualization results on diverse tasks. Pixel-SAIL flexibly interprets both visual prompts and text instruction inputs, responding with text and segmentation masks. + +Visual Affinity Map Analysis. We use PCA dimensionality reduction algorithm to visualize vision features, with results shown in Fig. 5. Our Pixel-SAIL's image features (3rd column) are denser and more diverse compared to the base MLLM's image features (2nd column). Pixel-SAIL's mask features, after the upsampling module, are denser and have better segmentation edges. Interestingly, Pixel-SAIL's image features (more focused on understanding, combining factors such as categories, colors, positions, etc.) exhibit different characteristics from mask features (more focused on perception, categories, and instances). As seen in the second row's third and fourth columns, the cars on the left and right have relatively distant feature representations in the image features, while they are very close in the mask features. + +# 5. Conclusion + +We explore the simplest architecture for pixel-grounded understanding tasks. In particular, we present Pixel-SAIL, which extends current SAIL-like MLLM for fine-grained understanding with three technical improvements (learnable upsampling module, new visual prompt encoding, and segmentor feature distillation). For the first time, our work proves that even without extra visual experts (visual encoder, segmentation models), one single transformer can still achieve stronger performance on four public referring segmentation benchmarks. We further introduce a more challenging benchmark, Perbench, to promote the development of pixel-MLLM community. + +Limitation and Future Work. Our work provides the simplest solution for pixel-grounded tasks. However, one limitation is that we only adopt 1.7M data for co-training. We will further explore Pixel-SAIL on more data (for example, billion-level masks along with visual prompts [26]) for cotraining. + +# References + +[1] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966, 2023. +[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. +[3] Satanjeev Banerjee and Alon Lavie. Meteor: An automatic metric for mt evaluation with improved correlation with human judgments. ACL, 2005. +[4] Mu Cai, Haotian Liu, Siva Karthik Mustikovela, Gregory P. Meyer, Yuning Chai, Dennis Park, and Yong Jae Lee. Making large multimodal models understand arbitrary visual prompts. In CVPR, 2024. +[5] Zheng Cai, Maosong Cao, Haojiong Chen, Kai Chen, Keyu Chen, Xin Chen, Xun Chen, Zehui Chen, Zhi Chen, Pei Chu, et al. Internl m2 technical report. arXiv preprint arXiv:2403.17297, 2024. +[6] Lin Chen, Jisong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions. arXiv preprint arXiv:2311.12793, 2023. +[7] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. +[8] Yangyi Chen, Xingyao Wang, Hao Peng, and Heng Ji. A single transformer for scalable vision-language modeling. TMLR, 2024. +[9] Yi-Chia Chen, Wei-Hua Li, Cheng Sun, Yu-Chiang Frank Wang, and Chu-Song Chen. Sam4mllm: Enhance multimodal large language model for referring expression segmentation. ECCV, 2024. +[10] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024. +[11] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In CVPR, 2024. +[12] Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In CVPR, 2022. +[13] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In ACL, 2019. +[14] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free + +vision-language models. arXiv preprint arXiv:2406.11832, 2024. +[15] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. NeurIPS, 2025. +[16] Haiwen Diao, Xiaotong Li, Yufeng Cui, Yueze Wang, Haoge Deng, Ting Pan, Wenxuan Wang, Huchuan Lu, and Xinlong Wang. Eve2: Improved baselines for encoder-free vision-language models. arXiv preprint arXiv:2502.06788, 2025. +[17] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In ICCV, 2021. +[18] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In ACMMM, 2024. +[19] Guang Feng, Zhiwei Hu, Lihe Zhang, and Huchuan Lu. Encoder fusion network with co-attention embedding for referring image segmentation. In CVPR, 2021. +[20] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, Yunsheng Wu, and Rongrong Ji. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. +[21] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2022. +[22] Louis Martin Hugo Touvron, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, and et al. Llama 2: Open foundation and fine-tuned chat models. arXiv:2307.09288, 2023. +[23] Tianrui Hui, Si Liu, Shaofei Huang, Guanbin Li, Sansi Yu, Faxi Zhang, and Jizhong Han. Linguistic structure guided context modeling for referring image segmentation. In ECCV, 2020. +[24] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. +[25] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In EMNLP, 2014. +[26] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. ICCV, 2023. +[27] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In CVPR, 2024. +[28] Weixian Lei, Jiacong Wang, Haochen Wang, Xiangtai Li, Jun Hao Liew, Jiashi Feng, and Zilong Huang. The scalability of simplicity: Empirical analysis of vision-language learning with a single transformer. arXiv, 2025. +[29] Bohao Li, Yuying Ge, Yixiao Ge, Guangzhi Wang, Rui Wang, Ruimao Zhang, and Ying Shan. Seed-bench: Benchmarking multimodal large language models. In CVPR, 2024. + +[30] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. +[31] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In ICML, 2022. +[32] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023. +[33] Xiangtai Li, Haobo Yuan, Wei Li, Henghui Ding, Size Wu, Wenwei Zhang, Yining Li, Kai Chen, and Chen Change Loy. Omg-seg: Is one model good enough for all segmentation? In CVPR, 2024. +[34] Xiaotong Li, Fan Zhang, Haiwen Diao, Yueze Wang, Xinlong Wang, and Ling-Yu Duan. Densefusion-1m: Merging vision experts for comprehensive multimodal perception. arXiv preprint arXiv:2407.08303, 2024. +[35] Yanghao Li, Hanzi Mao, Ross Girshick, and Kaiming He. Exploring plain vision transformer backbones for object detection. ECCV, 2022. +[36] Chen Liang, Wenguan Wang, Tianfei Zhou, Jiaxu Miao, Yawei Luo, and Yi Yang. Local-global context aware transformer for language-guided video segmentation. arXiv preprint arXiv:2203.09773, 2022. +[37] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. +[38] Weifeng Lin, Xinyu Wei, Ruichuan An, Peng Gao, Bocheng Zou, Yulin Luo, Siyuan Huang, Shanghang Zhang, and Hongsheng Li. Draw-and-understand: Leveraging visual prompts to enable mllms to comprehend what you want. arXiv preprint arXiv:2403.20271, 2024. +[39] Chang Liu, Henghui Ding, and Xudong Jiang. GRES: Generalized referring expression segmentation. In CVPR, 2023. +[40] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2023. +[41] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, 2024. +[42] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. *Polyformer: Referring image segmentation as sequential polygon generation*. *CVPR*, 2023. +[43] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In ECCV, 2024. +[44] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint, 2017. +[45] Timo Lüddecke and Alexander Ecker. Image segmentation using text and image prompts. In CVPR, 2022. +[46] Gen Luo, Xue Yang, Wenhan Dou, Zhaokai Wang, Jifeng Dai, Yu Qiao, and Xizhou Zhu. Mono-internvl: Pushing the + +boundaries of monolithic multimodal large language models with endogenous visual pre-training. CVPR, 2025. +[47] Chuofan Ma, Yi Jiang, Jiannan Wu, Zehuan Yuan, and Xiaojuan Qi. Groma: Localized visual tokenization for grounding multimodal large language models. In ECCV, 2024. +[48] Shehan Munasinghe, Hanan Gani, Wenqi Zhu, Jiale Cao, Eric Xing, Fahad Shahbaz Khan, and Salman Khan. Videoglamm: A large multimodal model for pixel-level visual grounding in videos. arXiv preprint arXiv:2411.04923, 2024. +[49] Lu Qi, Yi-Wen Chen, Lehan Yang, Tiancheng Shen, Xiangtai Li, Weidong Guo, Yu Xu, and Ming-Hsuan Yang. Generalizable entity grounding via assistance of large language model. arXiv preprint arXiv:2402.02555, 2024. +[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. +[51] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M. Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S. Khan. Glamm: Pixel grounding large multimodal model. In CVPR, 2024. +[52] Jeff Rasley, Samyam Rajbhandari, Olatunj Ruwase, and Yuxiong He. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In SIGKDD, 2020. +[53] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. +[54] Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. In CVPR, 2024. +[55] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Austin Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. In NeurIPS, 2024. +[56] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. Llama: Open and efficient foundation language models. arXiv:2302.13971, 2023. +[57] Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. Cider: Consensus-based image description evaluation. CVPR, 2015. +[58] Jiaqi Wang, Pan Zhang, Tao Chu, Yuhang Cao, Yujie Zhou, Tong Wu, Bin Wang, Conghui He, and Dahua Lin. V3det: Vast vocabulary visual detection dataset. In ICCV, 2023. +[59] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In CVPR, 2022. + +[60] Cong Wei, Haoxian Tan, Yujie Zhong, Yujiu Yang, and Lin Ma. LaSagnA: Language-based segmentation assistant for complex queries. arXiv preprint arXiv:2404.08506, 2024. +[61] Jianzong Wu, Xiangtai Li, Shilin Xu, Haobo Yuan, Henghui Ding, Yibo Yang, Xia Li, Jiangning Zhang, Yunhai Tong, Xudong Jiang, Bernard Ghanem, and Dacheng Tao. Towards open vocabulary learning: A survey. arXiv pre-print, 2023. +[62] Size Wu, Sheng Jin, Wenwei Zhang, Lumin Xu, Wentao Liu, Wei Li, and Chen Change Loy. F-lmm: Grounding frozen large multimodal models. CVPR, 2025. +[63] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In CVPR, 2024. +[64] Hu Xu, Saining Xie, Xiaoqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. arXiv preprint arXiv:2309.16671, 2023. +[65] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. +[66] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024. +[67] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022. +[68] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022. +[69] Zuyao You, Junke Wang, Lingyu Kong, Bo He, and Zuxuan Wu. Pix2cap-coco: Advancing visual comprehension via pixel-level captioning. arXiv preprint arXiv:2501.13893, 2025. +[70] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In ECCV, 2016. +[71] Haobo Yuan, Xiangtai Li, Chong Zhou, Yining Li, Kai Chen, and Chen Change Loy. Open-vocabulary sam: Segment and recognize twenty-thousand classes interactively. arXiv preprint, 2024. +[72] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and + +Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv preprint arXiv:2501.04001, 2025. +[73] Haobo Yuan, Tao Zhang, Xiangtai Li, Lu Qi, Zilong Huang, Shilin Xu, Jiashi Feng, and Ming-Hsuan Yang. 4th pvuw mevis 3rd place report: Sa2va. arXiv preprint arXiv:2504.00476, 2025. +[74] Yuqian Yuan, Wentong Li, Jian Liu, Dongqi Tang, Xinjie Luo, Chi Qin, Lei Zhang, and Jianke Zhu. Osprey: Pixel understanding with visual instruction tuning. In CVPR, 2024. +[75] Alireza Zareian, Kevin Dela Rosa, Derek Hao Hu, and Shih-Fu Chang. Open-vocabulary object detection using captions. In CVPR, 2021. +[76] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023. +[77] Pan Zhang, Xiaoyi Dong, Bin Wang, Yuhang Cao, Chao Xu, Linke Ouyang, Zhiyuan Zhao, Shuangrui Ding, Songyang Zhang, Haodong Duan, Wenwei Zhang, Hang Yan, Xinyue Zhang, Wei Li, Jingwen Li, Kai Chen, Conghui He, Xingcheng Zhang, Yu Qiao, Dahua Lin, and Jiaqi Wang. Internlm-xcomposer: A vision-language large model for advanced text-image comprehension and composition. arXiv preprint arXiv:2309.15112, 2023. +[78] Tao Zhang, Xingye Tian, Yu Wu, Shunping Ji, Xuebo Wang, Yuan Zhang, and Pengfei Wan. DVIS: Decoupled video instance segmentation framework. In ICCV, 2023. +[79] Tao Zhang, Xingye Tian, Yikang Zhou, Shunping Ji, Xuebo Wang, Xin Tao, Yuan Zhang, Pengfei Wan, Zhongyuan Wang, and Yu Wu. Dvis++: Improved decoupled framework for universal video segmentation. arXiv preprint arXiv:2312.13305, 2023. +[80] Tao Zhang, Xiangtai Li, Hao Fei, Haobo Yuan, Shengqiong Wu, Shunping Ji, Change Loy Chen, and Shuicheng Yan. Omg-llava: Bridging image-level, object-level, pixel-level reasoning and understanding. In NeurIPS, 2024. +[81] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In CVPR, 2024. +[82] Yikang Zhou, Tao Zhang, Shilin Xu, Shihao Chen, Qianyu Zhou, Yunhai Tong, Shunping Ji, Jiangning Zhang, Xiangtai Li, and Lu Qi. Are they the same? exploring visual correspondence shortcomings of multimodal llms. arXiv preprint arXiv:2501.04670, 2025. + +# Pixel-SAIL: Single Transformer For Pixel-Grounded Understanding + +Supplementary Material + +We first present more details on training and testing of our Pixel-SAIL in Sec. 6. Then, we present the detailed benchmark building process, in Sec. 7 and more challenging examples in PerBench in Sec. 8. Next, we present more comparison with current state-of-the-art pixel-grounded MLLMs, in Sec. 9. + +# 6. More Detailed Training and Testing + +Training. We will present more details about the training, including dataset sampling specifications and distillation methodology. For the RefCOCO series [25, 70] datasets, we randomly sample 5 referring expressions per image and organize them into a multi-round dialogue format as a single training data point, processing all images for four epochs. For COCO [37] data, we sample 5 categories per image and randomly select either instance mode or semantic mode to structure the responses. In instance mode, objects are arranged by their center points from left to right. We process the COCO dataset for one epoch. For Pixel2Cap [69], our generated detailed object caption data, and Osprey [74] object description data, we randomly sample 1-5 visual prompts per image and randomly incorporate questions about non-existent visual prompts, with responses indicating that these visual prompts do not exist. These object caption datasets are processed for five epochs. For other segmentation-related or visual prompt-related data, we conduct one epoch. For LLaVA-665k, we randomly sample at a 1:1 ratio alongside other data for joint training to ensure that the base MLLM's instruction-following capability remains intact. + +When the length of input tokens (including the length of vision tokens) exceeds 8192, we truncate the excess portion. For the 0.5B model, we use DeepSpeed Zero-1 [52] for training, and for the 3B and 7B models, we use DeepSpeed Zero-2 [52] for training. + +We distill the mask features generated by the Mask2Former [12] pixel decoder and the lowest resolution features generated by the SAM2 [53] image encoder onto the upsampled mask features from Pixel-SAIL and the image features directly reshaped from vision tokens, respectively. We use bilinear interpolation to align spatial dimensions and implement a learnable linear layer to align the channel size. The distillation process employs MSE loss with a weight of 0.5. + +Testing. We have elaborated on the testing details of Pixel-SAIL on pixel-grounded benchmarks in the main text. For general image question answering benchmarks, we follow the prompt settings of the base MLLMs and use + +VLMEvalKit [18] for evaluation, without using additional LLM assistance to identify answers. + +# 7. More Detailed Process on Benchmarking Building + +The construction of PerBench combines an automated model-generated pipeline with manual screening, correction, and annotation. The process is divided into three stages. + +The first stage involves annotating detailed object captions. We crop objects and draw visual prompts on the original images to prompt InternVL-2.5 78B [10] and Qwen-VL 2.5 72B [2] to generate detailed captions for the objects. These captions are then cross-validated using Qwen2.5 72B [66]. If all captions are consistent, they are integrated using an LLM; otherwise, the data are discarded. After the model automatically generates the detailed object captions, we manually select and correct 500 of them to form the final 500 detailed object caption data points in the benchmark. + +The second stage focuses on annotating visual-prompt question-answering data in an MCQ (Multiple Choice Question) format. In this phase, we manually generate a multiple-choice question for each object caption obtained from the first stage. After completing the annotations, two quality control specialists perform cross-verification to identify and rectify any potential errors. + +The final stage contains the annotation of visual-text referring segmentation data. At this stage, we manually select and annotate object segmentation masks, referring visual prompts, and text from SAM images. During the annotation process, we consider various factors such as positional relationships, event relationships, appearance, size, and more, including cases with both single and multiple visual prompts. Once the annotation is complete, two individuals review it, and correct the errors. + +# 8. More Challenging Cases in PerBench + +We present more detailed object caption samples from our PerBench in Fig. 6. The objects are derived from diverse senses and categories, encompassing humans, man-made objects, and natural landscapes. The object captions include basic categories, attributes, purposes, and relationships with surrounding objects. This high-quality benchmark will effectively advance the development of the visual prompt understanding community. + +More referring segmentation samples are illustrated in Fig. 7. Our manually annotated samples cover a variety of scenes, such as indoor and outdoor settings, and include + +![](images/54f00460d44c59b795de71e7634a25e6d01f0a7ce2b703a11af4760339be0174.jpg) + +The object is a person standing behind a wooden podium covered with a blue cloth, addressing an audience outdoors. The podium has an emblem, and the person is dressed in a dark blue jacket with a logo, a scarf, and a white shirt. The audience includes people seated on a stool and a folding chair, with trees, parked cars, and a building with large windows visible in the background. + +![](images/26dbd4c246279b4fc314092ff22dad26f9b64219d0101f942ada95ce6e793161.jpg) + +The object is a bronze statue of a young boy with exaggerated features. The statue is dressed in a formal outfit including a suit jacket, vest, and shorts, and it wears flip-flops. The boy holds a small object resembling a ball in his right hand. The statue's whimsical and playful appearance, characterized by exaggerated proportions and a sense of movement, is set on a rock-like structure in an urban street scene with a pedestrian crossing visible in the background. + +The object is a Smart Fortwo, a compact city car known for its small size and maneuverability. This car features predominantly white with green accents, including green wheels and trim. Designed for urban environments with limited parking space, it is parked on a sidewalk next to a charging station, indicating its electric vehicle status. The charging cable is connected, suggesting it is currently being charged. The surrounding area includes a paved road with multiple lanes and a grassy area separated by a curb. + +![](images/2186d7b541c3ac8b0b1e6f888aabb7d0139e70b68d4899e20fc2e63d36c16d9c.jpg) + +The object is a person dressed in black and blue attire, standing on skis in a snowy outdoor setting, preparing for or participating in a skiing event. This person is wearing a black beanie, a black puffy jacket, black gloves, black pants, and a blue bib with the number "1" in red. They also have ski poles and ski boots. In the background, there are other people skiing or standing around, along with parked cars and trees, indicating a recreational or competitive skiing area. + +![](images/52758840947ce05f90ecc252c8b5c5818b4964a9f94be12806665b38888ffaa2.jpg) + +![](images/3a6e35850e62808d80931bfa589b802dcb84f7e252ccaf4415e54f89948ea1b6.jpg) + +The object is a stack of white bangles adorned with small, colorful stones. These bangles, part of traditional jewelry, are worn on the arm of a person dressed in vibrant traditional attire, including a yellow scarf with colorful patterns and a floral-patterned garment, complementing the overall colorful and cultural appearance. + +![](images/7300f72a106255187cf4c86e7b5a5dc72806c38f843ff35d4057d0d97f0a0ca6.jpg) + +The object is a waterfall cascading down a steep, rocky cliff. The waterfall serves as the central focus, with water flowing smoothly from top to bottom. The area around the waterfall features rugged, uneven rock surfaces and patches of green vegetation. The cliff is part of a larger mountainous or hilly terrain, with dense foliage at the top. The waterfall's flow and movement create a striking contrast against the dark, textured rocks, highlighting the natural beauty of the scene. + +![](images/25c68c43191bca0b4140046d5bba17dabfcc96d22a4b3288b17449104cd462b6.jpg) +Figure 6. More visualization examples of detailed object captions from our PerBench. + +The object is a black shoulder bag with a flap closure. It is made of a synthetic or leather-like material, has a smooth texture and sheen, and is carried over the shoulder of a person walking in a grassy area near a wooden fence and a small white stool, dressed in a black top and pink shorts. + +![](images/9440ef894fa079e5258e6dfd1b6a1c3ddfd1826198e0db08f174e3db7c0242e4.jpg) + +The object is a navigational buoy used in maritime environments. It is a cylindrical structure with a pointed top, primarily white in color, featuring a pink top and a distinctive band. Mounted on a black base, it is situated in a body of water such as a harbor or marina, surrounded by other boats and buoys. Its function is to guide vessels through waterways, aiding navigation with its unique color pattern. This buoy is part of a larger maritime setting, often near populated areas or popular boating destinations. + +objects of multiple granularities. The referring text encompasses positional relationships, event relationships, and more. This new task is more challenging than current pure text referring segmentation tasks. + +Q: Please segment which object $\langle \text{vp\_0} \rangle$ is using. A: [SEG] Q: Please segment the $\langle \text{vp\_0} \rangle$ that is sitting. A: [SEG] + +![](images/637e2c6c160d921c8e8eb7a912d02ef64fb5d5f496a09547497acdb12cb96449.jpg) + +![](images/b3cba3595b4d3ca756e3999aab583c70696f7b48d9f22af4691894b857494ece.jpg) + +Q: Please segment where will arrive. A: [SEG] + +Q: Please segment the object that blocks the sunlight for $<\mathrm{vp\_0}$ . A: [SEG] + +![](images/c68060ea861a6cbea6396bbf326c799ca8df216d62e6c88c6bcb08edd708f8c3.jpg) + +![](images/a18d58ffa3b9e5997496a5e5cb52fc23eae362b5fdf646c42c3a31b431a2d3c0.jpg) + +Q: Please segment the letter closest to . +A: [SEG] + +Q: Please segment the object that is enjoying. +A: [SEG] + +![](images/21d42f32578b84b87baec4d382d3e198b1d4bb07238b436c05ce0aa66b7c57da.jpg) + +![](images/29ce58b437a5916dea640e00806154a31abae29e90df52edf2d67809c927bd7a.jpg) + +Q: Please segment the person who is holding $\langle \text{vp\_0} \rangle$ . A: [SEG] + +Q: Please segment the tree between $\langle \text{vp}_0 \rangle$ and $\langle \text{vp}_1 \rangle$ . A: [SEG] + +![](images/88b02944cee3f7b4a58e0b3ffd50ae70f9295df610d6fcc16d105ed2bf1d5cd1.jpg) +Figure 7. More visualization examples of vision-text referring segmentation from our PerBench. + +![](images/cff4a47f203768f0842f1df33e93ff0f70dc45da26e5754a5f9e6cd0db753944.jpg) + +# 9. More Comparison With SOTA Pixel-Grounded MLLM + +We conduct a qualitative comparative analysis with the SOTA pixel-grounded MLLM, Sa2VA [72], and present the visualization results in Fig. 8. We observe that both Pixel-SAIL and Sa2VA achieve excellent results in most cases. However, Sa2VA performs significantly weaker than Pixel-SAIL in certain scenarios, despite utilizing the much more powerful InternVL2.5 [10] compared to our base encoder-free MLLM [8]. In the left examples, Sa2VA performs notably worse than Pixel-SAIL in multi-object segmentation + +tasks. Additionally, in the right example, Sa2VA demonstrates significantly weaker attention to non-core areas of the image, such as edges, compared to Pixel-SAIL, leading to frequent failures in segmenting objects near image boundaries. + +Please segment the two women. + +Please segment all the mans in right side. + +![](images/c6d0d68534eafff67d8719670cfe877faf018304f100380c905e6b410dd5f80d.jpg) +Pixel-SAIL + +![](images/adbf3fb6b7b5c39d5eb61a072abe5dc979e86526a712b7224538858be92e6be9.jpg) +Sa2VA + +Please segment the flower. + +![](images/1670b5aa11ea16ec862993f96f4c48d8d423050174dfe97332d793ccceb7b98f.jpg) +Figure 8. Visualization Comparison of Sa2Va and Pixel-SAIL. + +![](images/aebf36c684ad3f3508655df02db8926f2ddf1297a2aafa83ac26c6da0953d95d.jpg) + +Please segment the person sitting on the floor. + +![](images/86b860bdc87c86e23424a603ebb6acb9c8d1a465e1f1714a657528991691867d.jpg) +Pixel-SAIL + +![](images/4617a9ba01a62e1555c8eda6e19463c940cd456014879986d1c05a09455702ec.jpg) + +![](images/5b4146250237eff7028b009970b91b0f8647976b0af9572a8aa9a1d2312d4f3c.jpg) + +![](images/0b7ffde63325f0b51fb4beebc982298a3c7a6c5d44f1cdeb9a7612bf4dab0654.jpg) \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10465/images/0b7ffde63325f0b51fb4beebc982298a3c7a6c5d44f1cdeb9a7612bf4dab0654.jpg b/data/2025/2504_10xxx/2504.10465/images/0b7ffde63325f0b51fb4beebc982298a3c7a6c5d44f1cdeb9a7612bf4dab0654.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a5a84f5fd320d2dca50ffa0107799cd2eb506c0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/0b7ffde63325f0b51fb4beebc982298a3c7a6c5d44f1cdeb9a7612bf4dab0654.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96e6b66d9d4f87cfcf70d57d1c296d4bf5b307220c6adb24e719d4cfdf8c3c8d +size 15594 diff --git a/data/2025/2504_10xxx/2504.10465/images/1670b5aa11ea16ec862993f96f4c48d8d423050174dfe97332d793ccceb7b98f.jpg b/data/2025/2504_10xxx/2504.10465/images/1670b5aa11ea16ec862993f96f4c48d8d423050174dfe97332d793ccceb7b98f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..81096ad2bbc9f91d30977bb05362665de953b4bf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/1670b5aa11ea16ec862993f96f4c48d8d423050174dfe97332d793ccceb7b98f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f05a22030ccc01619fcbd87cf3821979b841a2c1fce3e80f4d80cc50780a996a +size 10470 diff --git a/data/2025/2504_10xxx/2504.10465/images/1748944d684d603bdbb18ef6a58cc5f55b83c868283d1ba3cb803734338aa333.jpg b/data/2025/2504_10xxx/2504.10465/images/1748944d684d603bdbb18ef6a58cc5f55b83c868283d1ba3cb803734338aa333.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b50dae6727c8172190ffe5c2ba0fb049d194b1c3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/1748944d684d603bdbb18ef6a58cc5f55b83c868283d1ba3cb803734338aa333.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb581487b6b98605d83531b8aa32c6a231b0fb0f1db75ee24ec733e9e4f9e059 +size 6539 diff --git a/data/2025/2504_10xxx/2504.10465/images/174cae71eb7b8d0f28c535d16a8c49f3cb485fdc7e42599eb5170af5aef711ea.jpg b/data/2025/2504_10xxx/2504.10465/images/174cae71eb7b8d0f28c535d16a8c49f3cb485fdc7e42599eb5170af5aef711ea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14f6ce562d3ca5e8ba8f3dd969f3c2a13184f110 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/174cae71eb7b8d0f28c535d16a8c49f3cb485fdc7e42599eb5170af5aef711ea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8218024a03041184c33caa2fae1c20c5595e489af94221fbfb821123154677f1 +size 32093 diff --git a/data/2025/2504_10xxx/2504.10465/images/2186d7b541c3ac8b0b1e6f888aabb7d0139e70b68d4899e20fc2e63d36c16d9c.jpg b/data/2025/2504_10xxx/2504.10465/images/2186d7b541c3ac8b0b1e6f888aabb7d0139e70b68d4899e20fc2e63d36c16d9c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2668d018830d035a83e100b39c64b52015ea9d8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/2186d7b541c3ac8b0b1e6f888aabb7d0139e70b68d4899e20fc2e63d36c16d9c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22bba415cc345d03c6addb01cfc2a94e7d2200eb4a81fccbdfc78366561f6270 +size 13095 diff --git a/data/2025/2504_10xxx/2504.10465/images/21d42f32578b84b87baec4d382d3e198b1d4bb07238b436c05ce0aa66b7c57da.jpg b/data/2025/2504_10xxx/2504.10465/images/21d42f32578b84b87baec4d382d3e198b1d4bb07238b436c05ce0aa66b7c57da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68ebd4cb42adc69657b7e0f8c1d8f1a972364985 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/21d42f32578b84b87baec4d382d3e198b1d4bb07238b436c05ce0aa66b7c57da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eb31dbfa10a78a40d306c944d6a489f876ebd04761aab54409dabbaa070bc5c +size 7077 diff --git a/data/2025/2504_10xxx/2504.10465/images/2467c73883bd26062ac816e1e140539ec76f0e812af0524f67af05492787cfd0.jpg b/data/2025/2504_10xxx/2504.10465/images/2467c73883bd26062ac816e1e140539ec76f0e812af0524f67af05492787cfd0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ccac09d92f37c3a3d56216b9c7a520197a3b5af --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/2467c73883bd26062ac816e1e140539ec76f0e812af0524f67af05492787cfd0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac01090dcc51a169be82731735c50820087b4232f4972ffa47d93f8034624519 +size 166217 diff --git a/data/2025/2504_10xxx/2504.10465/images/25c68c43191bca0b4140046d5bba17dabfcc96d22a4b3288b17449104cd462b6.jpg b/data/2025/2504_10xxx/2504.10465/images/25c68c43191bca0b4140046d5bba17dabfcc96d22a4b3288b17449104cd462b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e3cacb2ca74fab06e3df4bd441e4c2699da99e9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/25c68c43191bca0b4140046d5bba17dabfcc96d22a4b3288b17449104cd462b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ccb309daadea3b2b1c735330182ea668e56fadd5e88f463c1b6bef190a6cd97 +size 14943 diff --git a/data/2025/2504_10xxx/2504.10465/images/26dbd4c246279b4fc314092ff22dad26f9b64219d0101f942ada95ce6e793161.jpg b/data/2025/2504_10xxx/2504.10465/images/26dbd4c246279b4fc314092ff22dad26f9b64219d0101f942ada95ce6e793161.jpg new file mode 100644 index 0000000000000000000000000000000000000000..617fcfebf67a21faa07429573a430caaa0a3c58e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/26dbd4c246279b4fc314092ff22dad26f9b64219d0101f942ada95ce6e793161.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f6002196cf58b5d8bfb768b2d30912f9187d55626532cd8933b3c0e47cdfbdb +size 7956 diff --git a/data/2025/2504_10xxx/2504.10465/images/29ce58b437a5916dea640e00806154a31abae29e90df52edf2d67809c927bd7a.jpg b/data/2025/2504_10xxx/2504.10465/images/29ce58b437a5916dea640e00806154a31abae29e90df52edf2d67809c927bd7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..470d08bcb0e82b4189b0084b1146ddc4988bc076 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/29ce58b437a5916dea640e00806154a31abae29e90df52edf2d67809c927bd7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce21c4d2bbc2183461e854d7c58f36b23e00a9ad03318b4265dc14c6040f0d0c +size 21644 diff --git a/data/2025/2504_10xxx/2504.10465/images/2ad3d40deefda5121871be7727625f1a4a15c5424e7c69bfaeec2b5381446be6.jpg b/data/2025/2504_10xxx/2504.10465/images/2ad3d40deefda5121871be7727625f1a4a15c5424e7c69bfaeec2b5381446be6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..147d96a6b36907391981e981d9bb3ff9c3eaa63e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/2ad3d40deefda5121871be7727625f1a4a15c5424e7c69bfaeec2b5381446be6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:087641881bc42c41ff523244a4165e3366803643c4a9a706f0a17803d90062f1 +size 24082 diff --git a/data/2025/2504_10xxx/2504.10465/images/3a6e35850e62808d80931bfa589b802dcb84f7e252ccaf4415e54f89948ea1b6.jpg b/data/2025/2504_10xxx/2504.10465/images/3a6e35850e62808d80931bfa589b802dcb84f7e252ccaf4415e54f89948ea1b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18708260c016ced5b9f3cd19421ea8d6a043d22f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/3a6e35850e62808d80931bfa589b802dcb84f7e252ccaf4415e54f89948ea1b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64858dce6e85de0eaf186961ceb6629647f8808b1d7789513852336ac59f3af5 +size 16704 diff --git a/data/2025/2504_10xxx/2504.10465/images/455e7389ccd133ebb97f68487917667e4abac0f3041ae3af7a16c8d5e4cc749b.jpg b/data/2025/2504_10xxx/2504.10465/images/455e7389ccd133ebb97f68487917667e4abac0f3041ae3af7a16c8d5e4cc749b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3e49e271b5eff5778868574f2c382b01e934f4c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/455e7389ccd133ebb97f68487917667e4abac0f3041ae3af7a16c8d5e4cc749b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa4bf989e6bfdc8a8ffef08de7b97cea0a6376cce24748a63776aa49c9085e7e +size 13433 diff --git a/data/2025/2504_10xxx/2504.10465/images/4617a9ba01a62e1555c8eda6e19463c940cd456014879986d1c05a09455702ec.jpg b/data/2025/2504_10xxx/2504.10465/images/4617a9ba01a62e1555c8eda6e19463c940cd456014879986d1c05a09455702ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a9d7d0dcb254fe502256b6d767df6d1e7cec3d50 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/4617a9ba01a62e1555c8eda6e19463c940cd456014879986d1c05a09455702ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2c1f8e40135ba4238517ca13c9aa03250d1e341b813db6f41e6741dad0c5221 +size 15883 diff --git a/data/2025/2504_10xxx/2504.10465/images/4d78f012581df715eb27cab4d9168bf99ced4bfddc463564b571508b9fb66030.jpg b/data/2025/2504_10xxx/2504.10465/images/4d78f012581df715eb27cab4d9168bf99ced4bfddc463564b571508b9fb66030.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce2ac85137594c6fa66cd321501e02eeadec6e35 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/4d78f012581df715eb27cab4d9168bf99ced4bfddc463564b571508b9fb66030.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b348e7ff1516f444469cedd7212a2982834ece7272305ae7ffa9b3b63178e5fc +size 5397 diff --git a/data/2025/2504_10xxx/2504.10465/images/4d7c01c3de069a344975deff39094f9b32939af00d3ab7023456d2e4efc96069.jpg b/data/2025/2504_10xxx/2504.10465/images/4d7c01c3de069a344975deff39094f9b32939af00d3ab7023456d2e4efc96069.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8773f7b1a089a30e77eea3c3510b80eb32e49c21 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/4d7c01c3de069a344975deff39094f9b32939af00d3ab7023456d2e4efc96069.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc936c3304c9af31c6b1e31588ceec39ccf799a9105ef93cb63318050903ed7e +size 111996 diff --git a/data/2025/2504_10xxx/2504.10465/images/51a610628940f7450f2cce02aab7c97bd296dcf6d63c3709ae587ebcc9940c11.jpg b/data/2025/2504_10xxx/2504.10465/images/51a610628940f7450f2cce02aab7c97bd296dcf6d63c3709ae587ebcc9940c11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e45a97b38c9ebe6b72c65193022b506a4065a69 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/51a610628940f7450f2cce02aab7c97bd296dcf6d63c3709ae587ebcc9940c11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7bb06aba01e7b410a05653314d7fd710fa21ee8ddf60301517e30a5f7a29135 +size 2931 diff --git a/data/2025/2504_10xxx/2504.10465/images/52758840947ce05f90ecc252c8b5c5818b4964a9f94be12806665b38888ffaa2.jpg b/data/2025/2504_10xxx/2504.10465/images/52758840947ce05f90ecc252c8b5c5818b4964a9f94be12806665b38888ffaa2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfe13f0bcaff6c7297bf8e8e7000b1c37bab7812 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/52758840947ce05f90ecc252c8b5c5818b4964a9f94be12806665b38888ffaa2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88270930235a44e35e08422e165ecaf7e62d0adb304fadf02a489420466953c0 +size 8290 diff --git a/data/2025/2504_10xxx/2504.10465/images/54f00460d44c59b795de71e7634a25e6d01f0a7ce2b703a11af4760339be0174.jpg b/data/2025/2504_10xxx/2504.10465/images/54f00460d44c59b795de71e7634a25e6d01f0a7ce2b703a11af4760339be0174.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fa14fe75d75459c2775129cb5b121b4eca7db05 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/54f00460d44c59b795de71e7634a25e6d01f0a7ce2b703a11af4760339be0174.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ddafec6d33c3ea01502206db3ca1af93026f2d16a04d930b95c2b89dc1f322c +size 15977 diff --git a/data/2025/2504_10xxx/2504.10465/images/555602258ffdfb0cda0578a5201c1603ada5da1407bdc869aa0c325320e39050.jpg b/data/2025/2504_10xxx/2504.10465/images/555602258ffdfb0cda0578a5201c1603ada5da1407bdc869aa0c325320e39050.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fac087d23af6b6965bcc13a528722b9940a8b883 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/555602258ffdfb0cda0578a5201c1603ada5da1407bdc869aa0c325320e39050.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd12d3b6eea9186a71ba71a9873157684b6731f220225914667406eb69cb32fe +size 3300 diff --git a/data/2025/2504_10xxx/2504.10465/images/576a8003516e688ddfec0f59b4c386ac5993c297f63a6a14bea084ff9ff5f7df.jpg b/data/2025/2504_10xxx/2504.10465/images/576a8003516e688ddfec0f59b4c386ac5993c297f63a6a14bea084ff9ff5f7df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dcfa9370f6901864220099bd9f2eb9f101d2104a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/576a8003516e688ddfec0f59b4c386ac5993c297f63a6a14bea084ff9ff5f7df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0efefd5c25d18917651a42a52479871f88592fb2713cc8285b8b562b5d55a468 +size 2792 diff --git a/data/2025/2504_10xxx/2504.10465/images/5a3cb155b0ff4d71d1da04b93abb3ec77b64982962df8f4621fb90b63ba49b49.jpg b/data/2025/2504_10xxx/2504.10465/images/5a3cb155b0ff4d71d1da04b93abb3ec77b64982962df8f4621fb90b63ba49b49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0bde160c9940ec7caebb88350a47d837d51af51a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/5a3cb155b0ff4d71d1da04b93abb3ec77b64982962df8f4621fb90b63ba49b49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ddc71ddae354a0be77238ce06f7822b9de1f98a17a001055d2072d45b2a6b7c +size 3490 diff --git a/data/2025/2504_10xxx/2504.10465/images/5b4146250237eff7028b009970b91b0f8647976b0af9572a8aa9a1d2312d4f3c.jpg b/data/2025/2504_10xxx/2504.10465/images/5b4146250237eff7028b009970b91b0f8647976b0af9572a8aa9a1d2312d4f3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef0b28381d03470320bb5faca721d1b4baa570a1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/5b4146250237eff7028b009970b91b0f8647976b0af9572a8aa9a1d2312d4f3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90692fecd89a13f557c2a11fa09341fcae1f763ecf0798afc313c080e36318b7 +size 10498 diff --git a/data/2025/2504_10xxx/2504.10465/images/5e29fcd4523ca0d6da3e40f45f1513abc6492464fdbacd397a078097c046b16e.jpg b/data/2025/2504_10xxx/2504.10465/images/5e29fcd4523ca0d6da3e40f45f1513abc6492464fdbacd397a078097c046b16e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d98eff896d3bed43e5af6f1b2b020a38ac34149d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/5e29fcd4523ca0d6da3e40f45f1513abc6492464fdbacd397a078097c046b16e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d10e484d246fd4152b7d960930b2800368152bb042ce6f6538c628fd4436584 +size 3753 diff --git a/data/2025/2504_10xxx/2504.10465/images/5fafcdafefd607096844ca33bce63f26dc79148a4c06e033e87fdba31edc6979.jpg b/data/2025/2504_10xxx/2504.10465/images/5fafcdafefd607096844ca33bce63f26dc79148a4c06e033e87fdba31edc6979.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7761a4f594d9ce5c1198897e318eaf1385acae82 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/5fafcdafefd607096844ca33bce63f26dc79148a4c06e033e87fdba31edc6979.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8e08d0914fb1bc9d12e1efb8ebec4f82d95678f4e8c416c043b2d0bc7b1e1bd +size 12382 diff --git a/data/2025/2504_10xxx/2504.10465/images/637e2c6c160d921c8e8eb7a912d02ef64fb5d5f496a09547497acdb12cb96449.jpg b/data/2025/2504_10xxx/2504.10465/images/637e2c6c160d921c8e8eb7a912d02ef64fb5d5f496a09547497acdb12cb96449.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cdf402fe35870b6c568e6ef75835e2250ec98e38 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/637e2c6c160d921c8e8eb7a912d02ef64fb5d5f496a09547497acdb12cb96449.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4abf0dca074e61f90e47e3e17dd8719145c3f14a46a72aa9cb130e93511bf41 +size 20334 diff --git a/data/2025/2504_10xxx/2504.10465/images/7300f72a106255187cf4c86e7b5a5dc72806c38f843ff35d4057d0d97f0a0ca6.jpg b/data/2025/2504_10xxx/2504.10465/images/7300f72a106255187cf4c86e7b5a5dc72806c38f843ff35d4057d0d97f0a0ca6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0f59ebe50f571ec24a481329b8a04df3996954c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/7300f72a106255187cf4c86e7b5a5dc72806c38f843ff35d4057d0d97f0a0ca6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cebfa7c19ec47654f02e03677aae02ed89754fade796a49b45e3242ab5508b4 +size 12836 diff --git a/data/2025/2504_10xxx/2504.10465/images/80c579cddfe2ed559771ad16801a79afb29c49427d171ce49295ed1addfbe13c.jpg b/data/2025/2504_10xxx/2504.10465/images/80c579cddfe2ed559771ad16801a79afb29c49427d171ce49295ed1addfbe13c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b8486acaa11a523b60fcb7e3b1a3c21657e3c824 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/80c579cddfe2ed559771ad16801a79afb29c49427d171ce49295ed1addfbe13c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fc09e19cec5baccf3db991d94c9d82ec910fa064ff7ca96953be11e43a05831 +size 3392 diff --git a/data/2025/2504_10xxx/2504.10465/images/82720d858e972b556426d0d7a59e765573479959ded0e27c4c7c0c3bcaa4ae1e.jpg b/data/2025/2504_10xxx/2504.10465/images/82720d858e972b556426d0d7a59e765573479959ded0e27c4c7c0c3bcaa4ae1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6bba7626bf36cf664e6ee6ad4eacca198db88017 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/82720d858e972b556426d0d7a59e765573479959ded0e27c4c7c0c3bcaa4ae1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdffbed3d4230871d2db0d8dfc24b9b75fb5a6c70128c26d77d0c095fadfc9c6 +size 4142 diff --git a/data/2025/2504_10xxx/2504.10465/images/8514ef98ba120a98f43055533243a21f0110a475a9d3057fb2ce9a607d8d7ff4.jpg b/data/2025/2504_10xxx/2504.10465/images/8514ef98ba120a98f43055533243a21f0110a475a9d3057fb2ce9a607d8d7ff4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ffb0a9a4e2d7be740179e40cb9e954dfa3a5db47 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/8514ef98ba120a98f43055533243a21f0110a475a9d3057fb2ce9a607d8d7ff4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dcf297fc73a031e0633a134b00d9f7d7162171b5f675c61e49af40b37fe87c3 +size 3121 diff --git a/data/2025/2504_10xxx/2504.10465/images/86b860bdc87c86e23424a603ebb6acb9c8d1a465e1f1714a657528991691867d.jpg b/data/2025/2504_10xxx/2504.10465/images/86b860bdc87c86e23424a603ebb6acb9c8d1a465e1f1714a657528991691867d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ab7b8c282ba9b7115c307af54c929c9bfbb66fd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/86b860bdc87c86e23424a603ebb6acb9c8d1a465e1f1714a657528991691867d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1dd70175e129f881d2cc31f73e99fd8e5edade803f74be55c28da501e64dcd5 +size 11072 diff --git a/data/2025/2504_10xxx/2504.10465/images/88b02944cee3f7b4a58e0b3ffd50ae70f9295df610d6fcc16d105ed2bf1d5cd1.jpg b/data/2025/2504_10xxx/2504.10465/images/88b02944cee3f7b4a58e0b3ffd50ae70f9295df610d6fcc16d105ed2bf1d5cd1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5df672acf3694e93d5ba7bd72c1efdb6e1ee67a5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/88b02944cee3f7b4a58e0b3ffd50ae70f9295df610d6fcc16d105ed2bf1d5cd1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97e44793e10bcf4296bcca438f2562e0e62aad835db79b896c3c39e130858292 +size 24777 diff --git a/data/2025/2504_10xxx/2504.10465/images/8b9e4c68f6ea43f611dad57e5712dd70f8478ec89248fbfbeb3f59bd966df9c9.jpg b/data/2025/2504_10xxx/2504.10465/images/8b9e4c68f6ea43f611dad57e5712dd70f8478ec89248fbfbeb3f59bd966df9c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f693b2ff0ceb3bc18eb39c7a90e3f39cb9319a8c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/8b9e4c68f6ea43f611dad57e5712dd70f8478ec89248fbfbeb3f59bd966df9c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:334603f6228f04bc89694baf85aac4f7e06b52f31ac6f480e702922918fb0ac3 +size 5474 diff --git a/data/2025/2504_10xxx/2504.10465/images/8c0aa32b9248a9481769b0b1ab82aaee0bd5d496ea09b8a99583223e7dc02225.jpg b/data/2025/2504_10xxx/2504.10465/images/8c0aa32b9248a9481769b0b1ab82aaee0bd5d496ea09b8a99583223e7dc02225.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2d99e5d818df4cab54660be98332e7ca58117ee --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/8c0aa32b9248a9481769b0b1ab82aaee0bd5d496ea09b8a99583223e7dc02225.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:959882acb8432c37a4bccc242c81174ad88c9d3416f16fe7e2ab6067e0ccec49 +size 6050 diff --git a/data/2025/2504_10xxx/2504.10465/images/9440ef894fa079e5258e6dfd1b6a1c3ddfd1826198e0db08f174e3db7c0242e4.jpg b/data/2025/2504_10xxx/2504.10465/images/9440ef894fa079e5258e6dfd1b6a1c3ddfd1826198e0db08f174e3db7c0242e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74d1eb2f3635be588084bf7e04300d6337d8d766 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/9440ef894fa079e5258e6dfd1b6a1c3ddfd1826198e0db08f174e3db7c0242e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b6118f541d363365d18072135e44a062d712f1c225fd5d61cd17f20517c8758 +size 11230 diff --git a/data/2025/2504_10xxx/2504.10465/images/956d0f979008f483a5008e483c6d996e4b1db14407264f0f0477c843be8c3d9a.jpg b/data/2025/2504_10xxx/2504.10465/images/956d0f979008f483a5008e483c6d996e4b1db14407264f0f0477c843be8c3d9a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f5e8703b3d956cca9fb8f656c52fcf2d4dce7a9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/956d0f979008f483a5008e483c6d996e4b1db14407264f0f0477c843be8c3d9a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4e4a59c5747063693bf0f3203d925880463d5fe4489ce727971483974a4d903 +size 1471 diff --git a/data/2025/2504_10xxx/2504.10465/images/9b7217328723a584f890fb9982f3e03057486311027c5ff17f4e48f7003ad32e.jpg b/data/2025/2504_10xxx/2504.10465/images/9b7217328723a584f890fb9982f3e03057486311027c5ff17f4e48f7003ad32e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad3ba5128a5d0ce178f65c175958bb8fd76c508d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/9b7217328723a584f890fb9982f3e03057486311027c5ff17f4e48f7003ad32e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57db883de0b1b9e805043b6ab0d40cafb78ab40736d967604f8836d3bd12a872 +size 4977 diff --git a/data/2025/2504_10xxx/2504.10465/images/a18d58ffa3b9e5997496a5e5cb52fc23eae362b5fdf646c42c3a31b431a2d3c0.jpg b/data/2025/2504_10xxx/2504.10465/images/a18d58ffa3b9e5997496a5e5cb52fc23eae362b5fdf646c42c3a31b431a2d3c0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c27a2df33bfa38501b9be340a6b8ed9167b32f6f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/a18d58ffa3b9e5997496a5e5cb52fc23eae362b5fdf646c42c3a31b431a2d3c0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebcb1ef2d62ad634fac1e7f0bad89f23af7c5708f0f24fd8dde82387d7db0821 +size 20457 diff --git a/data/2025/2504_10xxx/2504.10465/images/a312e96e807c99c7975509f3cc30a8650cb5647a303a336f39244b88fb17a222.jpg b/data/2025/2504_10xxx/2504.10465/images/a312e96e807c99c7975509f3cc30a8650cb5647a303a336f39244b88fb17a222.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d55d8f4b5e250fa7d89046137fb00d2d40e8be92 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/a312e96e807c99c7975509f3cc30a8650cb5647a303a336f39244b88fb17a222.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a427a2763c56d0e6f3fcd286776107f7bc297ee5069ba90795f8045a7958235 +size 7349 diff --git a/data/2025/2504_10xxx/2504.10465/images/aa0956d34ee08556b170855ddc7b77d845501068b701513f9dd4cd5aa1f0b6cb.jpg b/data/2025/2504_10xxx/2504.10465/images/aa0956d34ee08556b170855ddc7b77d845501068b701513f9dd4cd5aa1f0b6cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae27f610f65926002f08e6570da62d481f4ee821 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/aa0956d34ee08556b170855ddc7b77d845501068b701513f9dd4cd5aa1f0b6cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a310ac18bce3540e791c7a33266b2a630285cbd180de8b7cb2d9d00a65db663 +size 2713 diff --git a/data/2025/2504_10xxx/2504.10465/images/aa0c6a39fab68e9f25a60f16d1d7763c70a4ebb3afdb4147b705e13be16b2639.jpg b/data/2025/2504_10xxx/2504.10465/images/aa0c6a39fab68e9f25a60f16d1d7763c70a4ebb3afdb4147b705e13be16b2639.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18fad618b76bad1e3a1c94ce17123503ab8742d8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/aa0c6a39fab68e9f25a60f16d1d7763c70a4ebb3afdb4147b705e13be16b2639.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da62300d7302954d1f4349666350b138654b58a39e69f86b07967771a896ff80 +size 12892 diff --git a/data/2025/2504_10xxx/2504.10465/images/adbf3fb6b7b5c39d5eb61a072abe5dc979e86526a712b7224538858be92e6be9.jpg b/data/2025/2504_10xxx/2504.10465/images/adbf3fb6b7b5c39d5eb61a072abe5dc979e86526a712b7224538858be92e6be9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..885c12b0fc0ff17cd947eb5dce98ac63ceba9943 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/adbf3fb6b7b5c39d5eb61a072abe5dc979e86526a712b7224538858be92e6be9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:836cce6880a573196e06108debd1c5fefc75d6f7c484949879f35df6d719d275 +size 15481 diff --git a/data/2025/2504_10xxx/2504.10465/images/aebf36c684ad3f3508655df02db8926f2ddf1297a2aafa83ac26c6da0953d95d.jpg b/data/2025/2504_10xxx/2504.10465/images/aebf36c684ad3f3508655df02db8926f2ddf1297a2aafa83ac26c6da0953d95d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb2cabbece1651b574f0f807d244163e917c1dff --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/aebf36c684ad3f3508655df02db8926f2ddf1297a2aafa83ac26c6da0953d95d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62cc1415dd7b3bc4c0618988fb49c5cce10efdfa41562559f1d785dab8126ba6 +size 14402 diff --git a/data/2025/2504_10xxx/2504.10465/images/b1cbbc683726f798824e46d98003ddc8d8e08482742e724218e33775a000ce62.jpg b/data/2025/2504_10xxx/2504.10465/images/b1cbbc683726f798824e46d98003ddc8d8e08482742e724218e33775a000ce62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7431d8d1a6a155a9f61323d7b450c68bdbacd770 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/b1cbbc683726f798824e46d98003ddc8d8e08482742e724218e33775a000ce62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9170bf238ff51ac5f61f6ecc6c30e9cf656b85c328fedba2d3ac1bc114bac00a +size 4212 diff --git a/data/2025/2504_10xxx/2504.10465/images/b3cba3595b4d3ca756e3999aab583c70696f7b48d9f22af4691894b857494ece.jpg b/data/2025/2504_10xxx/2504.10465/images/b3cba3595b4d3ca756e3999aab583c70696f7b48d9f22af4691894b857494ece.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d26ddeb2a1a81d1b7ed43af62a5b69663d7de6cb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/b3cba3595b4d3ca756e3999aab583c70696f7b48d9f22af4691894b857494ece.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6829619a9d0ea5fe67725b9424c07a0248cb5a4b5049ec44ca07255df24ad2e +size 14830 diff --git a/data/2025/2504_10xxx/2504.10465/images/b98e83ba9dea39282a0c2ade0a8b2df0c60b7033fa236a79e7e6f9ddabb69a51.jpg b/data/2025/2504_10xxx/2504.10465/images/b98e83ba9dea39282a0c2ade0a8b2df0c60b7033fa236a79e7e6f9ddabb69a51.jpg new file mode 100644 index 0000000000000000000000000000000000000000..10a0ad1cfd74f33ac33141cbfc8f212d18bc2cb9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/b98e83ba9dea39282a0c2ade0a8b2df0c60b7033fa236a79e7e6f9ddabb69a51.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:835546d10dac1b6ab1f4638ef1c18f332ff6cc5546a5fff678d68367785ce346 +size 32852 diff --git a/data/2025/2504_10xxx/2504.10465/images/be2b66fed29fc8e20e40409c20d73777c16880d53864ef80c8dc867c5ad40a35.jpg b/data/2025/2504_10xxx/2504.10465/images/be2b66fed29fc8e20e40409c20d73777c16880d53864ef80c8dc867c5ad40a35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3f6bf221d9478b87636bdadd1628f53220ec208 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/be2b66fed29fc8e20e40409c20d73777c16880d53864ef80c8dc867c5ad40a35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:484d4dc1edb8335a0865fb9d4f71dd20697bd831acf3baf8dc3bb9cf5f781e94 +size 15040 diff --git a/data/2025/2504_10xxx/2504.10465/images/bf21200e51b1a51097e0d3cc0511c985b0448f7206b378f04cc6aff1759b35af.jpg b/data/2025/2504_10xxx/2504.10465/images/bf21200e51b1a51097e0d3cc0511c985b0448f7206b378f04cc6aff1759b35af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f67f157721f317299bfb70af8183585ff6d1a4fc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/bf21200e51b1a51097e0d3cc0511c985b0448f7206b378f04cc6aff1759b35af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c84bd0c514c993c6c29ef57cde1d622d3a481332c59f1d8ed821060b2701874b +size 2450 diff --git a/data/2025/2504_10xxx/2504.10465/images/c68060ea861a6cbea6396bbf326c799ca8df216d62e6c88c6bcb08edd708f8c3.jpg b/data/2025/2504_10xxx/2504.10465/images/c68060ea861a6cbea6396bbf326c799ca8df216d62e6c88c6bcb08edd708f8c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5454fb1e05c122cddb34f08dee368a62ec9bb6ce --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/c68060ea861a6cbea6396bbf326c799ca8df216d62e6c88c6bcb08edd708f8c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0daff62439896c2fd295a190bf6535139186d99be487a276ece840eb55f81166 +size 14232 diff --git a/data/2025/2504_10xxx/2504.10465/images/c6d0d68534eafff67d8719670cfe877faf018304f100380c905e6b410dd5f80d.jpg b/data/2025/2504_10xxx/2504.10465/images/c6d0d68534eafff67d8719670cfe877faf018304f100380c905e6b410dd5f80d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3b803b6774f345d3b3307cb996d494c42c7f3f9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/c6d0d68534eafff67d8719670cfe877faf018304f100380c905e6b410dd5f80d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:862bdcdd6989c79cdecdc9cf24b4eb236637520db94fac25352889ee1acedec1 +size 11224 diff --git a/data/2025/2504_10xxx/2504.10465/images/c9e0341d3155c0159d45850ef3883c1f855c59deeb7dcf71f646e5a702932d8a.jpg b/data/2025/2504_10xxx/2504.10465/images/c9e0341d3155c0159d45850ef3883c1f855c59deeb7dcf71f646e5a702932d8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..704470958e315429c0f9f19fea95e3eb52a92d20 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/c9e0341d3155c0159d45850ef3883c1f855c59deeb7dcf71f646e5a702932d8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d8c8bc319005e8cfa24b88b2696fef6f6b5b68fd219dcf4bc6daf0edac7a637 +size 7098 diff --git a/data/2025/2504_10xxx/2504.10465/images/cc0595ff87d12b9ef8be15175aa560c9ef16a01b5c5aff4e30984f3ad3cd312f.jpg b/data/2025/2504_10xxx/2504.10465/images/cc0595ff87d12b9ef8be15175aa560c9ef16a01b5c5aff4e30984f3ad3cd312f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cecb86c8a09b1d7cd01f3d76433a916eb39ca496 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/cc0595ff87d12b9ef8be15175aa560c9ef16a01b5c5aff4e30984f3ad3cd312f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fde7d4721312e36756e84cb34200588c9411e5db4746b5d6bddf0fc573e08218 +size 19159 diff --git a/data/2025/2504_10xxx/2504.10465/images/cec28f82f78a1ff66461359b6cf56928057d0bb8f10e5c03c2af24f3188d7bfc.jpg b/data/2025/2504_10xxx/2504.10465/images/cec28f82f78a1ff66461359b6cf56928057d0bb8f10e5c03c2af24f3188d7bfc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd1f9f605d271ef44491a4749842ac402f9f0527 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/cec28f82f78a1ff66461359b6cf56928057d0bb8f10e5c03c2af24f3188d7bfc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fb852241c91f2781ebe97395caa31f7b845f96c5a79a2761c92241baaa959e7 +size 2441 diff --git a/data/2025/2504_10xxx/2504.10465/images/cff4a47f203768f0842f1df33e93ff0f70dc45da26e5754a5f9e6cd0db753944.jpg b/data/2025/2504_10xxx/2504.10465/images/cff4a47f203768f0842f1df33e93ff0f70dc45da26e5754a5f9e6cd0db753944.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0bf87492018e64c013414f5e1c2d288aba42c937 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/cff4a47f203768f0842f1df33e93ff0f70dc45da26e5754a5f9e6cd0db753944.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c8eb55f6821efb71fc3a18537fc488a68e35c455bb972d5d3065c3917400c9b +size 32224 diff --git a/data/2025/2504_10xxx/2504.10465/images/d6b641908f41c0263a174252592424901ed71c8c03ba38b6916ca8c12726c9d7.jpg b/data/2025/2504_10xxx/2504.10465/images/d6b641908f41c0263a174252592424901ed71c8c03ba38b6916ca8c12726c9d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ab461422d977b9e0ba4a11d02c313f66531e20a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/d6b641908f41c0263a174252592424901ed71c8c03ba38b6916ca8c12726c9d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7ebaf6855109987db12009cda757e5a7e558340212a783d95d591809ddd501f +size 18377 diff --git a/data/2025/2504_10xxx/2504.10465/images/ddd809653d8358f3fa9d942f2ed362b8eb1d49d2406959486c808b219fa5bd1b.jpg b/data/2025/2504_10xxx/2504.10465/images/ddd809653d8358f3fa9d942f2ed362b8eb1d49d2406959486c808b219fa5bd1b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7411328ed0b32fe7c1cf0ec108d01abeda0b2b2b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/ddd809653d8358f3fa9d942f2ed362b8eb1d49d2406959486c808b219fa5bd1b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5aeeae127fbdd87f46d2ffeab842ecdab3d594e781f902213cc1aae58ae1e7e6 +size 6626 diff --git a/data/2025/2504_10xxx/2504.10465/images/e0390f905118034e33c14150b2e2f12717344bd0783dc4626314774a650f9b4d.jpg b/data/2025/2504_10xxx/2504.10465/images/e0390f905118034e33c14150b2e2f12717344bd0783dc4626314774a650f9b4d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4eeb8cd0f124596b420e4571c5c8769fdc031116 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/e0390f905118034e33c14150b2e2f12717344bd0783dc4626314774a650f9b4d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1a9b42cb51aecd9930c1ff333f30f1bd53dff77caea3f45cf8cdd53f447ce6e +size 5774 diff --git a/data/2025/2504_10xxx/2504.10465/images/ec4c320c8bb602c970cf6d5f5b650ee874030055777473a9e2cfe4a35408d8f6.jpg b/data/2025/2504_10xxx/2504.10465/images/ec4c320c8bb602c970cf6d5f5b650ee874030055777473a9e2cfe4a35408d8f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7db11e0c752014285fc7ad9df1e477032144b9cf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/ec4c320c8bb602c970cf6d5f5b650ee874030055777473a9e2cfe4a35408d8f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3f3a7e55727fe91bb1f25ad29ddce3a70c2c50af551d07337d447bc62cec41d +size 5204 diff --git a/data/2025/2504_10xxx/2504.10465/images/efec56ff4c133b921837f8320aa383f2ce9a78bb129bdb23c2b4f1b278cce84e.jpg b/data/2025/2504_10xxx/2504.10465/images/efec56ff4c133b921837f8320aa383f2ce9a78bb129bdb23c2b4f1b278cce84e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea4529d58f6d2027e931d7fedee0ca7c5cdef6cb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/efec56ff4c133b921837f8320aa383f2ce9a78bb129bdb23c2b4f1b278cce84e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88cb70ab93c22e1acc40db583d72630fc71a84ff407fa8e2957c8b9c880713f8 +size 8978 diff --git a/data/2025/2504_10xxx/2504.10465/images/f42004728503f31551f10e09916551225730c7ae2579b188ed67c1f60bb2c337.jpg b/data/2025/2504_10xxx/2504.10465/images/f42004728503f31551f10e09916551225730c7ae2579b188ed67c1f60bb2c337.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5830daa94242dbf8a9ef36c234b2d54833091f24 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/f42004728503f31551f10e09916551225730c7ae2579b188ed67c1f60bb2c337.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90d4bd899aef8b05b3c4123fe231de90dd1df0034661ce9dd7a91fe7443ca81c +size 7804 diff --git a/data/2025/2504_10xxx/2504.10465/images/fb4f0cc86bc8fa166e3d2d93309b4d2f49a2774966f93cbc596b8fc90285bfc2.jpg b/data/2025/2504_10xxx/2504.10465/images/fb4f0cc86bc8fa166e3d2d93309b4d2f49a2774966f93cbc596b8fc90285bfc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8133a11c34c4bfa3c12e33d634eaa254e38d2e81 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/fb4f0cc86bc8fa166e3d2d93309b4d2f49a2774966f93cbc596b8fc90285bfc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e68f235e90ef87724107eee4b156fb280a42d5386f9de60a99e1c1e3b9665c79 +size 5361 diff --git a/data/2025/2504_10xxx/2504.10465/images/fbef5dad172896c2b1b5166bd4ab0c83cd1af418d9f92e034ba690b473c1a42b.jpg b/data/2025/2504_10xxx/2504.10465/images/fbef5dad172896c2b1b5166bd4ab0c83cd1af418d9f92e034ba690b473c1a42b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43cf8aad2f1c82d209bd8f2fe72f4a86a7bf2449 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/images/fbef5dad172896c2b1b5166bd4ab0c83cd1af418d9f92e034ba690b473c1a42b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a55fc7cae6fc138a5a1f981073679fa2e73e2c1402a05f598fdbdfc7a821b11 +size 115041 diff --git a/data/2025/2504_10xxx/2504.10465/layout.json b/data/2025/2504_10xxx/2504.10465/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b79b7a821158bca9da16fcb47b1be93bf4237ab0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10465/layout.json @@ -0,0 +1,12163 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 94, + 103, + 515, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 103, + 515, + 121 + ], + "spans": [ + { + "bbox": [ + 94, + 103, + 515, + 121 + ], + "type": "text", + "content": "Pixel-SAIL: Single Transformer For Pixel-Grounded Understanding" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "spans": [ + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "content": "Tao Zhang" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "content": " Xiangtai Li" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "content": " Zilong Huang" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "content": " Yanwei Li" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "content": " Weixian Lei" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "content": " Xueqing Deng" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "content": " Shihao Chen" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "content": " Shunping Ji" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "content": " Jiashi Feng" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "content": " Bytedance Seed" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 90, + 141, + 510, + 185 + ], + "type": "text", + "content": " WHU" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 134, + 186, + 487, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 186, + 487, + 200 + ], + "spans": [ + { + "bbox": [ + 134, + 186, + 487, + 200 + ], + "type": "text", + "content": "Project Page: https://zhang-tao-whu.github.io/project/pixelsail" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 57, + 224, + 231, + 335 + ], + "blocks": [ + { + "bbox": [ + 57, + 224, + 231, + 335 + ], + "lines": [ + { + "bbox": [ + 57, + 224, + 231, + 335 + ], + "spans": [ + { + "bbox": [ + 57, + 224, + 231, + 335 + ], + "type": "image", + "image_path": "d6b641908f41c0263a174252592424901ed71c8c03ba38b6916ca8c12726c9d7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 76, + 349, + 215, + 358 + ], + "lines": [ + { + "bbox": [ + 76, + 349, + 215, + 358 + ], + "spans": [ + { + "bbox": [ + 76, + 349, + 215, + 358 + ], + "type": "text", + "content": "(a), Multi-modal Fusion with extra decoders" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 250, + 213, + 396, + 346 + ], + "blocks": [ + { + "bbox": [ + 250, + 213, + 396, + 346 + ], + "lines": [ + { + "bbox": [ + 250, + 213, + 396, + 346 + ], + "spans": [ + { + "bbox": [ + 250, + 213, + 396, + 346 + ], + "type": "image", + "image_path": "2ad3d40deefda5121871be7727625f1a4a15c5424e7c69bfaeec2b5381446be6.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 265, + 349, + 386, + 359 + ], + "lines": [ + { + "bbox": [ + 265, + 349, + 386, + 359 + ], + "spans": [ + { + "bbox": [ + 265, + 349, + 386, + 359 + ], + "type": "text", + "content": "(b), MLLM with segmentation experts" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 54, + 368, + 555, + 402 + ], + "lines": [ + { + "bbox": [ + 54, + 368, + 555, + 402 + ], + "spans": [ + { + "bbox": [ + 54, + 368, + 555, + 402 + ], + "type": "text", + "content": "Figure 1. Comparison of current MLLMs for pixel-wise understanding with our method. (a) and (b). Current MLLMs for pixel-wise understanding feature highly complex system architectures, including an LLM, a CLIP-like vision backbone, an object token extraction model, a segmentation vision backbone, and a SAM-like decoder. (c). Our method employs only a single transformer." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 420, + 225, + 524, + 334 + ], + "blocks": [ + { + "bbox": [ + 420, + 225, + 524, + 334 + ], + "lines": [ + { + "bbox": [ + 420, + 225, + 524, + 334 + ], + "spans": [ + { + "bbox": [ + 420, + 225, + 524, + 334 + ], + "type": "image", + "image_path": "efec56ff4c133b921837f8320aa383f2ce9a78bb129bdb23c2b4f1b278cce84e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 414, + 349, + 550, + 359 + ], + "lines": [ + { + "bbox": [ + 414, + 349, + 550, + 359 + ], + "spans": [ + { + "bbox": [ + 414, + 349, + 550, + 359 + ], + "type": "text", + "content": "(c), Pixel SAIL with one single transformer" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 151, + 413, + 200, + 425 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 413, + 200, + 425 + ], + "spans": [ + { + "bbox": [ + 151, + 413, + 200, + 425 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 54, + 439, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 439, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 54, + 439, + 296, + 715 + ], + "type": "text", + "content": "Multimodal Large Language Models (MLLMs) achieve remarkable performance for fine-grained pixel-level understanding tasks. However, all the works rely heavily on extra components, such as vision encoder (CLIP), segmentation experts, leading to high system complexity and limiting model scaling. In this work, our goal is to explore a highly simplified MLLM without introducing extra components. Our work is motivated by the recent works on Single trTransformer as a unified vVision-Language Model (SAIL) design, where these works jointly learn vision tokens and text tokens in transformers. We present Pixel-SAIL, a single transformer for pixel-wise MLLM tasks. In particular, we present three technical improvements on the plain baseline. First, we design a learnable upsampling module to refine visual token features. Secondly, we propose a novel visual prompt injection strategy to enable the single transformer to understand visual prompt inputs and benefit from the early fusion of visual prompt embeddings and vision tokens. Thirdly, we introduce a vision expert distillation strategy to efficiently enhance the single transformer's fine-grained feature extraction capability. In addition, we have collected a comprehensive pixel understanding benchmark (PerBench), using a manual check. It includes three tasks:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 414, + 555, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 414, + 555, + 510 + ], + "spans": [ + { + "bbox": [ + 313, + 414, + 555, + 510 + ], + "type": "text", + "content": "detailed object description, visual prompt-based question answering, and visual-text referring segmentation. Extensive experiments on four referring segmentation benchmarks, one visual prompt benchmark, and our PerBench show that our Pixel-SAIL achieves comparable or even better results with a much simpler pipeline. Code and model will be released at https://github.com/magicresearch/Sa2VA." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 515, + 394, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 515, + 394, + 526 + ], + "spans": [ + { + "bbox": [ + 314, + 515, + 394, + 526 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 312, + 535, + 555, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 535, + 555, + 643 + ], + "spans": [ + { + "bbox": [ + 312, + 535, + 555, + 643 + ], + "type": "text", + "content": "Multi-modal Large Language Models (MLLMs) have garnered significant research efforts, driven by advancements of Large Language Models (LLMs) [22, 56, 65]. While most studies focus on open-ended visual question answering tasks, there is a growing interest [51, 80] in fine-grained, pixel-level understanding. This enables broader applications, such as facilitating precise region-level editing and generation and achieving precise understanding of designated mask regions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 643, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 643, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 643, + 556, + 715 + ], + "type": "text", + "content": "Recent pixel-wise MLLMs [27, 51, 54, 63, 72, 80, 81] mainly adopt visual and language fusion frameworks, following design patterns [17, 42, 68] established before the LLM era. For example, LAVIT [68] adopts encoder-fusion approach, injecting language embedding (generated by BERT [13]) into vision transformers. With the advent of" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 55, + 70, + 195, + 87 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 70, + 195, + 87 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 195, + 87 + ], + "type": "text", + "content": "ByteDance | Seed" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 14, + 207, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 207, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 207, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.10465v1 [cs.CV] 14 Apr 2025" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 90, + 294, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 90, + 294, + 209 + ], + "spans": [ + { + "bbox": [ + 55, + 90, + 294, + 209 + ], + "type": "text", + "content": "LLMs [22, 65, 66], recent works [27, 54, 72, 80] integrate state-of-the-art segmentation models [26, 33, 53], for pixel-level understanding, by either appending them to LLM outputs or embedding LLM within segmentation pipelines. While effective, the overall architectures are complex, requiring specialized components such as vision-language fusion modules and additional decoders. Moreover, their final performance often heavily depends on either MLLMs or the segmentation models, which may lead to suboptimal results due to limitations within individual submodules." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 212, + 294, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 212, + 294, + 368 + ], + "spans": [ + { + "bbox": [ + 55, + 212, + 294, + 368 + ], + "type": "text", + "content": "In this work, we explore a novel, simple yet effective pixel-wise MLLM design, drawing inspiration from recent advancements in SAIL architecture, which is also called Encoder-free MLLMs. These methods drop the extra vision encoder and jointly co-train vision and language tokens on large scale datasets, with a simpler design. Moreover, they show competitive performance on image-level VQA tasks, compared with LLaVA. Motivated by this success, we extend the framework to pixel-level understanding tasks, aiming to reduce the complexity of existing approaches. To the best of our knowledge, this is the first study to explore the simplest architecture for pixel-wise MLLM tasks, including referring segmentation and visual prompt understanding." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 369, + 294, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 369, + 294, + 525 + ], + "spans": [ + { + "bbox": [ + 55, + 369, + 294, + 525 + ], + "type": "text", + "content": "We first directly extend SAIL architecture by adding segmentation token and visual prompt tokens to generate segmentation masks and output region caption, following previous works [27, 51, 74]. However, this leads to inferior results on both segmentation and visual prompt understanding. Several reasons are: (1), The misalignments on high resolution features since there are no segmentation decoders since SAIL directly reshape the vision tokens into features. (2), Previous works directly adopt mask pooling on high level visual tokens where SAIL baseline only maps RGB inputs with one projection layer, where most tokens are low level features. (3), The mask quality is low since no segmentation experts are involved." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 527, + 294, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 527, + 294, + 706 + ], + "spans": [ + { + "bbox": [ + 55, + 527, + 294, + 706 + ], + "type": "text", + "content": "To solve these problems, we present three simple technical improvements, which lead to our Pixel-SAIL framework. First, we design a simple learnable up-sampling module to refine the low resolution visual tokens in high resolution features. Our goal is to keep the design as simple as possible, where only one transposed 2D convolution is involved. Then, for visual prompt understanding, we design a novel visual prompt injection method, where we map the visual prompts into special text tokens without introducing extra visual prompt encoder in the middle stage of SAIL. Next, we propose to distill the previous segmentation experts into SAIL to improve mask quality. All the improvements are plug-in-play, and we verify the effectiveness on various SAIL architectures, including SOLO [8] and EVEv2 [16]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 708, + 294, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 708, + 294, + 731 + ], + "spans": [ + { + "bbox": [ + 55, + 708, + 294, + 731 + ], + "type": "text", + "content": "Then, to further indicate the effectiveness of our Pixel-SAIL and facilitate the development of pixel-LLM com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 91, + 553, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 553, + 246 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 553, + 246 + ], + "type": "text", + "content": "munity, we further design a new challenging benchmark, PerBench. Compared with previous pixel-wise MLLM benchmarks, we have three innovative and challenging features. First, we include a detailed object caption where most existing benchmarks only contain short captions without fine-gained contents. Secondly, we re-evaluate visual-prompt understanding as multi-choice VQA tasks following MME [20] and MMBench [43] to achieve more accurate region caption evaluation. Thirdly, we introduce a task by segmenting objects jointly referenced by visual prompts and text. Our benchmark reveals the limitation of current state-of-the-art pixel-wise MLLM on fine-grained understanding and mixed referring tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 247, + 553, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 247, + 553, + 390 + ], + "spans": [ + { + "bbox": [ + 313, + 247, + 553, + 390 + ], + "type": "text", + "content": "Pixel-SAIL is jointly co-trained with mixed data engine on referring segmentation datasets, VQA datasets, and visual prompt datasets. Experimental results show that our method can achieve better results on five pixel-wise benchmarks. In particular, on RefCOCOg and RefCOCO+ datasets, our method with 3B size can outperform previous pixel MLLMs, including GLaMM (7B) and OMG-LLaVA (7B), by " + }, + { + "bbox": [ + 313, + 247, + 553, + 390 + ], + "type": "inline_equation", + "content": "1.5 - 3.0\\%" + }, + { + "bbox": [ + 313, + 247, + 553, + 390 + ], + "type": "text", + "content": " with a simpler pipeline. On our Per-Bench, our method achieves 24.2 METEOR, " + }, + { + "bbox": [ + 313, + 247, + 553, + 390 + ], + "type": "inline_equation", + "content": "74\\%" + }, + { + "bbox": [ + 313, + 247, + 553, + 390 + ], + "type": "text", + "content": " accuracy, 33.4 cIoU and 42.2 overall score, surpassing the SOTA MLLMs GLaMM (7B) and Sa2VA (4B) with overall scores of 26.9 and 3.2, respectively." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 401, + 400, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 401, + 400, + 414 + ], + "spans": [ + { + "bbox": [ + 314, + 401, + 400, + 414 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 421, + 553, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 421, + 553, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 421, + 553, + 732 + ], + "type": "text", + "content": "Large Vision Language Models. Staring from CLIP [50] and ALIGN [24], modern vision language models have adopted contrastive learning on large-scale image-text datasets for learning vision-text aligned representations. The trained models are also proven to work well on open-vocabulary perception, such as segmentation [45, 71, 78, 79] and detection [21, 58, 61, 75]. The following works [31, 32, 64, 76] share the same network design, exploring modified loss functions and targeting data quality and filtering. Then, with the rise of large language models [5, 22, 56, 65], recent works [1, 10, 11, 40, 55, 77] mainly focus on multimodal large language models for open-ended settings, such as visual question answering or OCR benchmarks. On representative work, LLaVA [40], uses the CLIP to encode images into visual tokens and sends the visual tokens to LLMs. After that, the following works [1, 30, 41] improve designs with scaled high quality datasets, images, and videos constraining. Meanwhile, several recent works [8, 14, 16, 46] also explore the visual encoder-free designs, which jointly learn the image and text representation in a single transformer architecture. For example, SOLO [8] collects mixed language and vision datasets and trains one transformer for VQA tasks, while EVE [14] designs a CLIP supervision to enhance visual token learning. Our work follows the visual encoder-free design, and we go a step further by exploring pixel-grounded understanding tasks, including ground" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 91, + 294, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 294, + 126 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 294, + 126 + ], + "type": "text", + "content": "ing tasks and visual prompt understanding. To our knowledge, we are the first to apply encoder-free architecture for pixel-grounded understanding tasks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 128, + 295, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 128, + 295, + 330 + ], + "spans": [ + { + "bbox": [ + 55, + 128, + 295, + 330 + ], + "type": "text", + "content": "Referring Expression Segmentation. This task outputs specific masks driven by text description. Earlier works [19, 23, 36, 39, 67] explore various fusion architecture and modules to enhance text and vision feature alignments. Equipped with LLMs, several recent advanced works [27, 48, 49, 51, 63, 72, 73, 80, 82] propose more complex referring tasks, including reasoning referring or joint mask and caption generation. In particular, LISA [27] involves complex expression while GLaMM [51] annotates a new dataset and proposes region-level caption and segmentation tasks. However, all these works contain complex designs: extra vision encoders, segmentation encoders, mask decoders, and prompt encoders. Our method, Pixel-SAIL, only has one transformer to jointly learn the joint visual and language feature. With proposed data engine and improved methods, Pixel-SAIL achieves good results with much simpler architecture." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 331, + 295, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 331, + 295, + 464 + ], + "spans": [ + { + "bbox": [ + 55, + 331, + 295, + 464 + ], + "type": "text", + "content": "Visual Prompt Understanding. Understanding visual prompts plays an important role when building interaction between VLMs and human. Recent works [4, 38, 47, 51, 74] build new visual prompt datasets for region caption generation and prompt-aware VQA tasks. ViP-LLaVA [4] overlays the visual prompts directly onto the image canvas and fine-tunes the LLaV on a specific visual prompt dataset, while Osprey [74] explores pixel-wise mask regions into language instructions. Our method can also be extended into visual prompt understanding with our proposed prompt token injection design." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 477, + 111, + 489 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 477, + 111, + 489 + ], + "spans": [ + { + "bbox": [ + 55, + 477, + 111, + 489 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 497, + 269, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 497, + 269, + 510 + ], + "spans": [ + { + "bbox": [ + 55, + 497, + 269, + 510 + ], + "type": "text", + "content": "3.1. Encoder Free MLLM and Plain Baseline" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 515, + 295, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 515, + 295, + 624 + ], + "spans": [ + { + "bbox": [ + 55, + 515, + 295, + 624 + ], + "type": "text", + "content": "Recently, several encoder-free MLLMs [8, 15, 16, 46] achieve comparable performance with those extra vision encoders. These models jointly learn vision and text features in a single transformer, with much simpler architecture. In particular, SOLO uses a simple project layer to map the image into visual tokens and then combines language tokens as the inputs of the transformer. However, no works have explored such new architecture for fine-grained vision language tasks (region caption, referring masks)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "spans": [ + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "text", + "content": "Plain Baseline. To fill this gap, we first construct a plain single transformer baseline, motivated by the previous ViT-based MLLMs [27, 72]. We start it with a pre-trained encoder-free MLLM. For segmentation tasks, we modify previous mask generation methods into the single transformer. First, we reshape the hidden states of the last transformer layer of vision tokens " + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{V} \\in \\mathbb{R}^{N \\times C}" + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "text", + "content": " into image features " + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{F} \\in \\mathbb{R}^{\\frac{H}{S} \\times \\frac{W}{S} \\times C}" + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "text", + "content": " represents the number of vision tokens, " + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "text", + "content": " denotes the channel size, " + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 55, + 624, + 295, + 732 + ], + "type": "text", + "content": " indicate" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "text", + "content": "the height and width of the image, " + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "text", + "content": " stands for the down-sampling stride. Then, the image features are then crossmultiplied with the hidden states of the predicted segmentation token " + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "inline_equation", + "content": "\\mathcal{Q} \\in \\mathbb{R}^{K \\times C}" + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "text", + "content": " to generate the segmentation masks " + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "inline_equation", + "content": "\\mathcal{M} \\in \\mathbb{R}^{K \\times \\frac{H}{S} \\times \\frac{W}{S}}" + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "text", + "content": " signifies the number of predicted segmentation tokens, following previous works [27, 51]. For visual prompt understanding, we employ a pooling-based method [74] to derive object representations " + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "inline_equation", + "content": "\\mathcal{O} \\in \\mathbb{R}^{M \\times C}" + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "text", + "content": " from image patch embeddings " + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "inline_equation", + "content": "\\mathcal{P} \\in \\mathbb{R}^{\\frac{H}{P} \\times \\frac{W}{P} \\times C}" + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "text", + "content": ". These object embeddings are fed into the single transformer to represent the corresponding objects. " + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "text", + "content": " represents the number of visual prompts, and " + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 313, + 91, + 553, + 270 + ], + "type": "text", + "content": " denotes the patch size. For segmentation tasks, we adopt extra mask loss. Otherwise, we adopt the same text loss for VQA tasks and visual prompt understanding tasks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 271, + 555, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 271, + 555, + 415 + ], + "spans": [ + { + "bbox": [ + 313, + 271, + 555, + 415 + ], + "type": "text", + "content": "Limitation. The plain baseline demonstrates a certain level of pixel-text alignment capability since both segmentation token and visual prompt token are jointly learned with vision and language tokens. However, the plain baseline exhibits several significant shortcomings: 1) The segmentation mask quality is poor due to the large feature down-sampling stride (16 or 32), even when using simple pixel shuffle or bilinear interpolation for up-sampling. 2) The single transformer struggles to comprehend the referential target of object representation, as the object representation is summarized from image patch embeddings with poor semantic information." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 426, + 429, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 426, + 429, + 437 + ], + "spans": [ + { + "bbox": [ + 313, + 426, + 429, + 437 + ], + "type": "text", + "content": "3.2. Pixel-SAIL Method" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 444, + 553, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 444, + 553, + 635 + ], + "spans": [ + { + "bbox": [ + 313, + 444, + 553, + 635 + ], + "type": "text", + "content": "Given the substantial shortcomings, the performance of plain baseline in fine-grained pixel understanding tasks falls significantly, compared to vision-expert competitors (Sec.4). To solve these challenges, we have implemented three key enhancements to the baseline architecture. First, we integrate a learnable up-sampling module to fully exploit the segmentation capabilities of the single transformer architecture. Second, we develop an innovative visual prompt injection mechanism that facilitates effective interpretation of visual prompt inputs. Our method enables early-stage fusion between vision tokens and visual prompt embeddings. Finally, we introduce a dense feature distillation strategy that significantly improves the model's capacity for extracting fine-grained visual features. These improvements collectively address the shortcomings of the plain baseline while maintaining its architectural simplicity." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 636, + 553, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 636, + 553, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 636, + 553, + 732 + ], + "type": "text", + "content": "Learnable Up-sampling Module. Inspired by [35], we also incorporate a simple learnable up-sampling model " + }, + { + "bbox": [ + 313, + 636, + 553, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 313, + 636, + 553, + 732 + ], + "type": "text", + "content": " to generate the high-resolution features " + }, + { + "bbox": [ + 313, + 636, + 553, + 732 + ], + "type": "inline_equation", + "content": "F_{h} \\in \\mathbb{R}^{\\frac{H}{4} \\times \\frac{W}{4} \\times C}" + }, + { + "bbox": [ + 313, + 636, + 553, + 732 + ], + "type": "text", + "content": " essential for pixel-level grounding. The up-sampling module comprises multiple up-sampling blocks, each consisting of a transposed 2D convolution followed by a depth-wise convolution. It effectively upscales the low-resolution features " + }, + { + "bbox": [ + 313, + 636, + 553, + 732 + ], + "type": "inline_equation", + "content": "F_{l} \\in \\mathbb{R}^{\\frac{H}{S} \\times \\frac{W}{S} \\times C}" + }, + { + "bbox": [ + 313, + 636, + 553, + 732 + ], + "type": "text", + "content": ", derived from resized vision tokens," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 88, + 534, + 255 + ], + "blocks": [ + { + "bbox": [ + 76, + 88, + 534, + 255 + ], + "lines": [ + { + "bbox": [ + 76, + 88, + 534, + 255 + ], + "spans": [ + { + "bbox": [ + 76, + 88, + 534, + 255 + ], + "type": "image", + "image_path": "4d7c01c3de069a344975deff39094f9b32939af00d3ab7023456d2e4efc96069.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 262, + 555, + 297 + ], + "lines": [ + { + "bbox": [ + 54, + 262, + 555, + 297 + ], + "spans": [ + { + "bbox": [ + 54, + 262, + 555, + 297 + ], + "type": "text", + "content": "Figure 2. The architecture of our proposed plain baseline and Pixel-SAIL. Pixel-SAIL is as simple and elegant as the plain baseline but demonstrates significantly improved performance. The examples on the right demonstrate that Pixel-SAIL possesses the capability for general conversation and comprehensive pixel-grounded understanding." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 312, + 218, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 312, + 218, + 323 + ], + "spans": [ + { + "bbox": [ + 55, + 312, + 218, + 323 + ], + "type": "text", + "content": "to one-quarter of the original resolution." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 326, + 296, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 326, + 296, + 422 + ], + "spans": [ + { + "bbox": [ + 55, + 326, + 296, + 422 + ], + "type": "text", + "content": "Visual Prompt Injection. Previous works [51, 72, 74] summarize the referenced object features via pooling on vision tokens from ViT encoder. However, there are no such visual tokens for encoder-free MLLMs. Thus, the inherent semantic deficiency hinders the single transformer's ability to precisely identify referenced objects based solely on feature summaries derived from patch embeddings, where most are low-level cues, such as edges." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "spans": [ + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "text", + "content": "To overcome this limitation, we propose an innovative visual prompt injection mechanism. Our approach integrates multiple visual prompt special tokens " + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "inline_equation", + "content": "\\{VP_{i}|i\\in [1,N]\\}" + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "text", + "content": " into the large language model's vocabulary. These tokens' text embeddings " + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{V}\\mathcal{P}^t\\in \\mathbb{R}^{N\\times C}" + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "text", + "content": " are used to fill mask-based visual prompts " + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{M}^{vp}\\in \\mathbb{R}^{N\\times \\frac{H}{P}\\times \\frac{W}{P}}" + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "text", + "content": " , thereby creating visual prompt tokens " + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{V}\\mathcal{P}\\in \\mathbb{R}^{\\frac{HW}{P^2}\\times C}" + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "text", + "content": " . The vision tokens " + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{V}\\in \\mathbb{R}^{\\frac{HW}{P^2}\\times C}" + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "text", + "content": " are first added with these visual prompt tokens " + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{V}\\mathcal{P}" + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "text", + "content": " before being processed by the single transformer. This enhancement enables the model to accurately identify referenced objects by leveraging the corresponding special tokens " + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "inline_equation", + "content": "\\{VP_{i}|i\\in [1,N]\\}" + }, + { + "bbox": [ + 55, + 426, + 296, + 584 + ], + "type": "text", + "content": " within the text instructions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 588, + 296, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 588, + 296, + 732 + ], + "spans": [ + { + "bbox": [ + 55, + 588, + 296, + 732 + ], + "type": "text", + "content": "Dense Feature Distillation. Due to the lack of large-scale, high-quality segmentation data like SA-1B [26], the method produces poor-quality masks, particularly at object boundaries. However, directly training on large-scale segmentation datasets would be costly and damage the original instruction following capabilities. To address both, we employ pre-trained segmentation experts to distill the single transformer, ensuring optimization of object details without hurting VQA capabilities. We perform distillation by leveraging mask features generated by Mask2Former's [12] pixel decoder on the upsampled mask features " + }, + { + "bbox": [ + 55, + 588, + 296, + 732 + ], + "type": "inline_equation", + "content": "F_{h} \\in \\mathbb{R}_{\\frac{H}{4}}^{\\frac{H}{4} \\times \\frac{W}{4} \\times C}" + }, + { + "bbox": [ + 55, + 588, + 296, + 732 + ], + "type": "text", + "content": " and utilizing features produced by SAM2's [53] encoder" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 316, + 309, + 553, + 534 + ], + "blocks": [ + { + "bbox": [ + 316, + 309, + 553, + 534 + ], + "lines": [ + { + "bbox": [ + 316, + 309, + 553, + 534 + ], + "spans": [ + { + "bbox": [ + 316, + 309, + 553, + 534 + ], + "type": "image", + "image_path": "fbef5dad172896c2b1b5166bd4ab0c83cd1af418d9f92e034ba690b473c1a42b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 541, + 555, + 564 + ], + "lines": [ + { + "bbox": [ + 313, + 541, + 555, + 564 + ], + "spans": [ + { + "bbox": [ + 313, + 541, + 555, + 564 + ], + "type": "text", + "content": "Figure 3. Visual examples on our PerBench. Best view it in color and zoom in." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 570, + 555, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 555, + 609 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 555, + 609 + ], + "type": "text", + "content": "on the low-resolution features " + }, + { + "bbox": [ + 313, + 570, + 555, + 609 + ], + "type": "inline_equation", + "content": "F_{l} \\in \\mathbb{R}^{\\frac{H}{S} \\times \\frac{W}{S} \\times C}" + }, + { + "bbox": [ + 313, + 570, + 555, + 609 + ], + "type": "text", + "content": ". This simple distillation strategy improves segmentation quality with only a negligible increase in training time." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 617, + 486, + 631 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 486, + 631 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 486, + 631 + ], + "type": "text", + "content": "3.3. Benchmark and Dataset Engine" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 635, + 554, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 635, + 554, + 694 + ], + "spans": [ + { + "bbox": [ + 313, + 635, + 554, + 694 + ], + "type": "text", + "content": "Our Benchmark: PerBench. We further manually annotate a benchmark named PerBench (Pixel-grounded Understanding Benchmark). PerBench aims to address three aspects lacking in existing pixel grounding benchmarks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 696, + 554, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 696, + 554, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 696, + 554, + 732 + ], + "type": "text", + "content": "The first aspect is detailed object caption. Previous works [6, 34] have emphasized more detailed image captions, demonstrating that comprehensive captions signifi" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 54, + 91, + 294, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 91, + 294, + 223 + ], + "spans": [ + { + "bbox": [ + 54, + 91, + 294, + 223 + ], + "type": "text", + "content": "cantly enhance model performance. However, current object caption datasets such as Osprey-724k [74] and evaluation benchmarks like Refcocog provide only cursory object captions. To address this limitation, we leverage SOTA models InternVL2.5-78B [11] and Qwen2.5VL-72B [2] to generate detailed object captions. These detailed object captions are then meticulously screened and refined through manual review, ultimately yielding 500 precise, nuanced object captions to serve as a robust evaluation benchmark. METEOR [3] serves as the evaluation metric for the detailed object caption task." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 224, + 296, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 224, + 296, + 450 + ], + "spans": [ + { + "bbox": [ + 57, + 224, + 296, + 450 + ], + "type": "text", + "content": "The second aspect is the assessment of visual-prompt understanding ability in multiple-choice format. Although captioning tasks can accurately reflect a model's visual prompt understanding ability, precise and fair evaluation is difficult. Rule-based metrics such as CIDEr [57] and METEOR [3] are affected by response length, format, and ground-truth quality, while using models as evaluators inevitably introduces model bias. Therefore, a fair and quantitative visual-prompt understanding benchmark is necessary. Inspired by MMBench [43] and MME [20], we manually annotated 500 multiple-choice questions based on detailed object captions, covering the examination of models' understanding of referenced objects' appearance, attributes, uses, and relationships with surrounding objects. MLLMs need to perceive the attributes of referenced objects accurately and have instruction-following ability to select the appropriate choice correctly. Accuracy is selected as the evaluation metric for the visual prompt-based multiple-choice questions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 453, + 295, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 453, + 295, + 619 + ], + "spans": [ + { + "bbox": [ + 55, + 453, + 295, + 619 + ], + "type": "text", + "content": "The third aspect is segmenting objects jointly referenced by visual prompts and text, abbreviated as V-T RES. It aims to test the model's ability to understand objects indicated by user-input visual prompts and segment associated objects according to text instructions. This task comprehensively assesses the MLLM's pixel-grounded understanding ability, requiring the model to possess precise visual prompt understanding capabilities, text reasoning abilities, and pixel grounding skills. We also manually annotate 500 V-T RES samples, which five expert annotators double-check. Similar with RefCOCO series datasets, we select cIoU and gIoU as the evaluation metric for V-T RES task. The overall score of PerBench is the average of the normalized scores (0-100) from the above three tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 621, + 295, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 621, + 295, + 670 + ], + "spans": [ + { + "bbox": [ + 55, + 621, + 295, + 670 + ], + "type": "text", + "content": "Our benchmark can be used to evaluate pixel-wise MLLMs and point out more challenging directions for detailed object understanding, joint visual prompts, and text understanding to the current community." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 670, + 295, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 670, + 295, + 718 + ], + "spans": [ + { + "bbox": [ + 55, + 670, + 295, + 718 + ], + "type": "text", + "content": "Dataset Engine. To fully unleash the potential of the single transformer, we collect diverse pixel-grounded data, including segmentation datasets and visual-prompt understanding datasets, following previous works [16, 46]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 720, + 294, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 720, + 294, + 731 + ], + "spans": [ + { + "bbox": [ + 67, + 720, + 294, + 731 + ], + "type": "text", + "content": "For segmentation-related data, we first use Ref-" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 91, + 553, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 553, + 199 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 553, + 199 + ], + "type": "text", + "content": "COCO+/g [25, 70] and COCO [37] semantic segmentation data used in LISA [27], the Grandf dataset (214k samples) used in GLaMM [51], and MUSE data (246k samples) used in PixelLM [54]. We also use recent Pixel2Cap [69] data (comprising 20k images) and organized it into the referring segmentation format. Finally, we further add COCO [37] panoptic segmentation data and structured it as: \"Question: Please segment the {class name} in instance mode. Answer: {class name}-1 [SEG], ..., {class name}-n [SEG].\"" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 199, + 553, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 199, + 553, + 317 + ], + "spans": [ + { + "bbox": [ + 313, + 199, + 553, + 317 + ], + "type": "text", + "content": "For visual prompt understanding, we employ two public datasets: Osprey-724k [74] and Pixel2Cap [69]. Additionally, we reformat the COCO dataset into a question-answer structure specifically designed to query object categories. To enhance the model's capability for fine-grained object description, we prompt the InternVL2.5-78B [11] model to generate approximately 300k detailed object captions derived from 10k SA-1B [26] images. Lastly, to maintain the instruction following ability, we also integrate the LLaVA1.5 [40] 665k dataset into our training data." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 318, + 553, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 318, + 553, + 366 + ], + "spans": [ + { + "bbox": [ + 313, + 318, + 553, + 366 + ], + "type": "text", + "content": "Training. We combine all the aforementioned data for cotraining. The loss function consists of the next token prediction loss " + }, + { + "bbox": [ + 313, + 318, + 553, + 366 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ntp}" + }, + { + "bbox": [ + 313, + 318, + 553, + 366 + ], + "type": "text", + "content": ", the segmentation loss " + }, + { + "bbox": [ + 313, + 318, + 553, + 366 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{seg}" + }, + { + "bbox": [ + 313, + 318, + 553, + 366 + ], + "type": "text", + "content": ", and the distillation loss " + }, + { + "bbox": [ + 313, + 318, + 553, + 366 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{distill}" + }, + { + "bbox": [ + 313, + 318, + 553, + 366 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 319, + 372, + 553, + 385 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 372, + 553, + 385 + ], + "spans": [ + { + "bbox": [ + 319, + 372, + 553, + 385 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {n t p} + \\mathcal {L} _ {s e g} + \\alpha \\mathcal {L} _ {\\text {d i s t i l l}}, \\quad \\mathcal {L} _ {s e g} = \\lambda \\mathcal {L} _ {c e} + \\beta \\mathcal {L} _ {\\text {s e g}}, \\tag {1}", + "image_path": "e0390f905118034e33c14150b2e2f12717344bd0783dc4626314774a650f9b4d.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 391, + 488, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 391, + 488, + 403 + ], + "spans": [ + { + "bbox": [ + 313, + 391, + 488, + 403 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 391, + 488, + 403 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 391, + 488, + 403 + ], + "type": "text", + "content": " is set to 0.5, " + }, + { + "bbox": [ + 313, + 391, + 488, + 403 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 391, + 488, + 403 + ], + "type": "text", + "content": " to 2.0 and " + }, + { + "bbox": [ + 313, + 391, + 488, + 403 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 313, + 391, + 488, + 403 + ], + "type": "text", + "content": " to 0.5." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 413, + 391, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 413, + 391, + 426 + ], + "spans": [ + { + "bbox": [ + 313, + 413, + 391, + 426 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 433, + 555, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 433, + 555, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 433, + 555, + 732 + ], + "type": "text", + "content": "Implementation Details. We extensively evaluate our meta-architecture using two open-source encoder-free multimodal large language models: SOLO [8] and EVEv2 [16]. For SOLO, following [28], we modify the attention mechanism between vision tokens from causal attention to full attention and conduct supervised fine-tuning on the LLaVA1.5 665k dataset. For SOLO, we modify the attention mechanism between vision tokens from causal attention to full attention and replace the LLM with Qwen2.5 [66] 0.5B and 3B, respectively. For EVEv2, we retain its original architecture and weights without any modifications. We build Pixel-SAIL 0.5B and 3B based on our modified SOLO baseline, and 7B on EVEv2. When training Pixel-SAIL based on SOLO, we maintain the original resolution of input images. For images with a long side exceeding 1024, we preserve the aspect ratio and resize the long side to 1024. When training Pixel-SAIL based on EVEv2, we resize the images to the closest to " + }, + { + "bbox": [ + 313, + 433, + 555, + 732 + ], + "type": "inline_equation", + "content": "800^2" + }, + { + "bbox": [ + 313, + 433, + 555, + 732 + ], + "type": "text", + "content": " pixels to reduce training costs, which differs from the original setting of " + }, + { + "bbox": [ + 313, + 433, + 555, + 732 + ], + "type": "inline_equation", + "content": "1600^2" + }, + { + "bbox": [ + 313, + 433, + 555, + 732 + ], + "type": "text", + "content": ". The training process is conducted on 32 A100 (80GB) GPUs using the AdamW [44] optimizer with a cosine decay learning rate scheduler. We set the initial learning rate to 4e-5, the warm-up ratio to 0.03, and the batch size to 256. The training duration for the 0.5B and 3B models is 12 hours and 24 hours, respectively." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 91, + 113, + 517, + 379 + ], + "blocks": [ + { + "bbox": [ + 55, + 89, + 555, + 110 + ], + "lines": [ + { + "bbox": [ + 55, + 89, + 555, + 110 + ], + "spans": [ + { + "bbox": [ + 55, + 89, + 555, + 110 + ], + "type": "text", + "content": "Table 1. Performance on referring segmentation benchmarks. The evaluation metric is cIoU. \"ft\" denotes fine-tuning on the specific dataset." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 91, + 113, + 517, + 379 + ], + "lines": [ + { + "bbox": [ + 91, + 113, + 517, + 379 + ], + "spans": [ + { + "bbox": [ + 91, + 113, + 517, + 379 + ], + "type": "table", + "html": "
MethodLLM SizeRefCOCO+RefCOCOgRefCOCOgRefCOCO
valtestAtestBval(U)test(U)valtestAtestBvaltestAtestB
Referring Segmentation Specialist Without MLLM
VLT [17]-56.361.050.155.057.767.570.565.252.562.250.5
CRIS [59]-62.368.153.759.960.470.573.266.155.363.851.0
LAVT [68]-62.168.455.161.262.172.775.868.857.665.355.0
PolyFormer-L [42]-69.374.661.969.270.276.078.373.3---
ReLA [39]-66.071.057.765.066.073.876.570.256.459.058.4
MLLMs With Vision Expert
LISA (ft) [27]7B65.170.858.167.970.674.979.172.3---
PixelLM [54]7B66.371.758.369.370.573.076.568.2---
GSVA (ft) [63]7B64.567.758.671.172.076.477.472.861.769.260.3
GroundHog [81]7B70.575.064.974.174.678.579.975.766.7--
GlaMM (ft) [51]7B72.678.764.674.274.979.583.276.9---
SAM4MLLM [9]7B73.577.865.874.575.679.682.876.166.370.163.2
LaSagnA [60]7B66.470.660.170.671.976.878.773.838.150.442.1
OMG-LLaVA (ft) [80]7B69.173.163.072.972.978.080.374.1---
F-LLM [62]7B65.875.258.570.171.775.879.572.4---
Sa2VA [72]4B74.3--76.7-80.4-----
MLLMs Without Vision Expert
Pixel-SAIL0.5B70.875.865.475.476.777.980.575.963.971.563.6
Pixel-SAIL (ft)0.5B73.077.068.075.676.179.181.777.068.074.066.8
Pixel-SAIL3B75.779.772.078.780.480.882.679.067.774.667.1
Pixel-SAIL (ft)3B76.279.771.278.579.481.883.478.872.177.170.4
", + "image_path": "2467c73883bd26062ac816e1e140539ec76f0e812af0524f67af05492787cfd0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 61, + 403, + 288, + 436 + ], + "blocks": [ + { + "bbox": [ + 58, + 389, + 293, + 400 + ], + "lines": [ + { + "bbox": [ + 58, + 389, + 293, + 400 + ], + "spans": [ + { + "bbox": [ + 58, + 389, + 293, + 400 + ], + "type": "text", + "content": "Table 2. Region caption performance on RefCOCOg dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 61, + 403, + 288, + 436 + ], + "lines": [ + { + "bbox": [ + 61, + 403, + 288, + 436 + ], + "spans": [ + { + "bbox": [ + 61, + 403, + 288, + 436 + ], + "type": "table", + "html": "
Method SizePixel-SAIL 0.5BPixel-SAIL 3BSa2VA 4BOMG-LLaVA 7BOsprey 7BGLaMM 7B
METEOR16.017.617.315.316.616.2
", + "image_path": "be2b66fed29fc8e20e40409c20d73777c16880d53864ef80c8dc867c5ad40a35.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 61, + 475, + 288, + 557 + ], + "blocks": [ + { + "bbox": [ + 55, + 441, + 295, + 474 + ], + "lines": [ + { + "bbox": [ + 55, + 441, + 295, + 474 + ], + "spans": [ + { + "bbox": [ + 55, + 441, + 295, + 474 + ], + "type": "text", + "content": "Table 3. The performance on our PerBench. Due to the lack of visual prompt understanding capability, LISA scores 0 on all tasks." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 61, + 475, + 288, + 557 + ], + "lines": [ + { + "bbox": [ + 61, + 475, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 61, + 475, + 288, + 557 + ], + "type": "table", + "html": "
ModelSizeDetailed Caption METEORMCQ AccV-T RESOverall Score
cIoUgIoU
LISA [27]7B00000
Osprey [74]7B13.40.12008.5
GLaMM [51]7B12.60.1424.314.615.3
Sa2VA [72]4B19.20.7131.921.939.0
Pixel-SAIL0.5B21.40.6929.719.838.4
Pixel-SAIL3B24.20.7433.423.542.2
", + "image_path": "174cae71eb7b8d0f28c535d16a8c49f3cb485fdc7e42599eb5170af5aef711ea.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 61, + 597, + 288, + 671 + ], + "blocks": [ + { + "bbox": [ + 55, + 561, + 295, + 594 + ], + "lines": [ + { + "bbox": [ + 55, + 561, + 295, + 594 + ], + "spans": [ + { + "bbox": [ + 55, + 561, + 295, + 594 + ], + "type": "text", + "content": "Table 4. Performance on the VQA benchmarks. " + }, + { + "bbox": [ + 55, + 561, + 295, + 594 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 55, + 561, + 295, + 594 + ], + "type": "text", + "content": " refers to the use of an " + }, + { + "bbox": [ + 55, + 561, + 295, + 594 + ], + "type": "inline_equation", + "content": "800^{2}" + }, + { + "bbox": [ + 55, + 561, + 295, + 594 + ], + "type": "text", + "content": " resolution, which differs from the " + }, + { + "bbox": [ + 55, + 561, + 295, + 594 + ], + "type": "inline_equation", + "content": "1600^{2}" + }, + { + "bbox": [ + 55, + 561, + 295, + 594 + ], + "type": "text", + "content": " resolution in the pre-trained model." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 61, + 597, + 288, + 671 + ], + "lines": [ + { + "bbox": [ + 61, + 597, + 288, + 671 + ], + "spans": [ + { + "bbox": [ + 61, + 597, + 288, + 671 + ], + "type": "table", + "html": "
ModelLLM SizeMMEMMBenchSEEDMMStar
SOLO0.5B523.2/222.513.845.526.2
SOLO3B1155.7/257/553.465.440.3
EVEv2*7B1128.0/240.760.354.244.9
Pxiel-SAIL0.5B564.1/150.731.852.226.3
Pixel-SAIL3B1187.3/242.956.366.140.1
Pixel-SAIL*7B1081.0/260.458.964.744.3
", + "image_path": "b98e83ba9dea39282a0c2ade0a8b2df0c60b7033fa236a79e7e6f9ddabb69a51.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 684, + 296, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 684, + 296, + 733 + ], + "spans": [ + { + "bbox": [ + 55, + 684, + 296, + 733 + ], + "type": "text", + "content": "Evaluation Setup. For visual prompt understanding and general image QA tasks, we adhere to the same setting as the base MLLM. In the case of segmentation-related tasks, if the model fails to predict a [SEG] token, we compel it" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 399, + 555, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 399, + 555, + 422 + ], + "spans": [ + { + "bbox": [ + 313, + 399, + 555, + 422 + ], + "type": "text", + "content": "to produce a [SEG] token to ensure the generation of the segmentation result." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 425, + 400, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 425, + 400, + 437 + ], + "spans": [ + { + "bbox": [ + 313, + 425, + 400, + 437 + ], + "type": "text", + "content": "4.1. Main Results" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 443, + 555, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 443, + 555, + 658 + ], + "spans": [ + { + "bbox": [ + 312, + 443, + 555, + 658 + ], + "type": "text", + "content": "Results on Referring Segmentation Benchmarks. We compare Pixel-SAIL with other pixel-grounded MLLMs and segmentation specialists on the RefCOCO+ [70], RefCOCOg [70], RefCOCO [25], and gRefCOCO [39] datasets. The comparison results are shown in Tab. 1. Pixel-SAIL 0.5B achieved 70.8, 75.4, and 77.9 cIoU on the validation splits of RefCOCO+, RefCOCOg, and RefCOCO, outperforming all segmentation specialists with comparable model sizes while also maintaining image conversation capabilities. Compared to the classical SAM-based MLLM competitor LISA-7B [27], Pixel-SAIL 0.5B surpassed it by 4.2, 7.9, and 7.8 cIoU on RefCOCO, RefCOCO+, and RefCOCOg respectively, despite having a much smaller model size (0.5B vs. 7B). On the more complex gRefCOCO dataset that includes multi-object segmentation, Pixel-SAIL 0.5B outperformed the carefully designed GSVA-7B [63] by 6.3, 4.8, and 6.5 cIoU on validation, testA, and testB splits respectively." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 659, + 556, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 659, + 556, + 733 + ], + "spans": [ + { + "bbox": [ + 313, + 659, + 556, + 733 + ], + "type": "text", + "content": "When scaling the model to 3B, Pixel-SAIL achieved 75.7, 78.7, 80.8, and 67.7 cIoU on RefCOCO+, RefCOCOg, RefCOCO, and gRefCOCO datasets respectively, surpassing all larger-sized (7B) MLLMs assisted with vision experts. Pixel-SAIL-3B even outperformed the SOTA Sa2VA-4B [72] (which uses the powerful InternVL2-" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 126, + 194, + 191 + ], + "blocks": [ + { + "bbox": [ + 55, + 90, + 197, + 123 + ], + "lines": [ + { + "bbox": [ + 55, + 90, + 197, + 123 + ], + "spans": [ + { + "bbox": [ + 55, + 90, + 197, + 123 + ], + "type": "text", + "content": "Table 5. Ablation study on the components of Pixel-SAIL. \"RC\" denotes region caption on RefCOCOg dataset." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 126, + 194, + 191 + ], + "lines": [ + { + "bbox": [ + 59, + 126, + 194, + 191 + ], + "spans": [ + { + "bbox": [ + 59, + 126, + 194, + 191 + ], + "type": "table", + "html": "
ModelRefCOCO+/gRC
Plain Baseline64.5/57.3/60.11.0
+ Upsampling69.7/62.5/65.30.9
+ Training Data76.2/69.6/73.81.4
+ VP Injection77.4/70.4/75.216.1
+ Distillation77.9/70.8/75.416.0
", + "image_path": "cc0595ff87d12b9ef8be15175aa560c9ef16a01b5c5aff4e30984f3ad3cd312f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 203, + 140, + 319, + 190 + ], + "blocks": [ + { + "bbox": [ + 200, + 92, + 320, + 137 + ], + "lines": [ + { + "bbox": [ + 200, + 92, + 320, + 137 + ], + "spans": [ + { + "bbox": [ + 200, + 92, + 320, + 137 + ], + "type": "text", + "content": "Table 6. Ablation study on Base MLLM. The training data only includes LLaVA-665k and Ref-COCO+/g." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 203, + 140, + 319, + 190 + ], + "lines": [ + { + "bbox": [ + 203, + 140, + 319, + 190 + ], + "spans": [ + { + "bbox": [ + 203, + 140, + 319, + 190 + ], + "type": "table", + "html": "
MLLMSizeRefCOCO/+/g
SOLO0.5B69.7/62.5/65.3
SOLO3B73.2/66.4/69.1
EVEv27B74.9/68.7/71.3
", + "image_path": "455e7389ccd133ebb97f68487917667e4abac0f3041ae3af7a16c8d5e4cc749b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 324, + 138, + 439, + 184 + ], + "blocks": [ + { + "bbox": [ + 321, + 119, + 435, + 129 + ], + "lines": [ + { + "bbox": [ + 321, + 119, + 435, + 129 + ], + "spans": [ + { + "bbox": [ + 321, + 119, + 435, + 129 + ], + "type": "text", + "content": "caption on RefCOCOg dataset." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 324, + 138, + 439, + 184 + ], + "lines": [ + { + "bbox": [ + 324, + 138, + 439, + 184 + ], + "spans": [ + { + "bbox": [ + 324, + 138, + 439, + 184 + ], + "type": "table", + "html": "
DataRefCOCO+/gRC
Basic Data69.7/62.5/65.3-
+ Seg Data76.2/69.6/73.8-
+ VP Data77.4/70.4/75.216.1
", + "image_path": "5fafcdafefd607096844ca33bce63f26dc79148a4c06e033e87fdba31edc6979.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 444, + 125, + 550, + 185 + ], + "blocks": [ + { + "bbox": [ + 321, + 95, + 553, + 118 + ], + "lines": [ + { + "bbox": [ + 321, + 95, + 553, + 118 + ], + "spans": [ + { + "bbox": [ + 321, + 95, + 553, + 118 + ], + "type": "text", + "content": "Table 7. Ablation on the train- Table 8. Ablation study on ing data. \"RC\" denotes region the distillation strategy." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 444, + 125, + 550, + 185 + ], + "lines": [ + { + "bbox": [ + 444, + 125, + 550, + 185 + ], + "spans": [ + { + "bbox": [ + 444, + 125, + 550, + 185 + ], + "type": "table", + "html": "
DataRefCOCO+/g
w/o Distill77.5/70.5/75.5
M2F77.7/71.0/75.8
SAM277.8/70.9/75.9
Both78.1/70.8/76.1
", + "image_path": "aa0c6a39fab68e9f25a60f16d1d7763c70a4ebb3afdb4147b705e13be16b2639.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 198, + 295, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 198, + 295, + 234 + ], + "spans": [ + { + "bbox": [ + 55, + 198, + 295, + 234 + ], + "type": "text", + "content": "4B [10] and SAM2-L [53]), achieving performance advantages of 1.4 and 2.0 cIoU on the more challenging RefCOCO+ and RefCOCOg datasets respectively." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 236, + 296, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 236, + 296, + 367 + ], + "spans": [ + { + "bbox": [ + 55, + 236, + 296, + 367 + ], + "type": "text", + "content": "Results on Visual Prompt Understanding Benchmarks. We evaluate the region caption performance on the RefCOCOg dataset, with results shown in Tab. 2. The training dataset of Pixel-SAIL does not include the RefCOCOg region caption dataset, so we directly evaluate its zero-shot performance. Pixel-SAIL-0.5B achieves a METEOR score of 16.0, surpassing OMG-LLaVA 7B by 0.7 points. When scaling the model to 3B, Pixel-SAIL achieves a METEOR score of 17.6, outperforming carefully designed larger models such as Osprey 7B and GLaMM 7B by 1.0 and 1.4 points respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 370, + 296, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 370, + 296, + 585 + ], + "spans": [ + { + "bbox": [ + 55, + 370, + 296, + 585 + ], + "type": "text", + "content": "Results on PerBench. We have benchmarked several popular pixel-grounded MLLMs on our proposed PerBench, with results shown in Tab. 3. LISA [27] scores 0 points across all tasks due to its inability to understand visual prompt inputs. Osprey [74] demonstrates strong object caption capabilities; however, it achieved only 13.4 METEOR in detailed caption tasks and " + }, + { + "bbox": [ + 55, + 370, + 296, + 585 + ], + "type": "inline_equation", + "content": "12.0\\%" + }, + { + "bbox": [ + 55, + 370, + 296, + 585 + ], + "type": "text", + "content": " accuracy in MCQ tasks due to limitations from short object caption lengths in its training data and impaired instruction-following ability. GLaMM [51] and Sa2VA [72] both exhibit comprehensive prompt understanding and segmentation capabilities, though GLaMM's weaker instruction-following ability resulted in only " + }, + { + "bbox": [ + 55, + 370, + 296, + 585 + ], + "type": "inline_equation", + "content": "14.0\\%" + }, + { + "bbox": [ + 55, + 370, + 296, + 585 + ], + "type": "text", + "content": " accuracy in MCQ tasks. PixelSAIL-0.5B achieves an overall score of 38.4, comparable to Sa2VA-4B despite Pixel-SAIL having a more powerful base MLLM and segmentation expert. Notably, Pixel-SAIL-3B achieves an overall score of 42.2, outperforming Sa2VA-4B across all three tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 588, + 296, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 588, + 296, + 732 + ], + "spans": [ + { + "bbox": [ + 55, + 588, + 296, + 732 + ], + "type": "text", + "content": "Results on VQA Benchmarks. We compare the visual question answering performance of Pixel-SAIL with the corresponding base MLLMs on the MME [20], MM-Bench [43], SEED [29], and MMStar [7] benchmarks, and the results are presented in Tab. 4. When the model size is 0.5B, Pixel-SAIL demonstrates performance improvements over the base MLLM across all four benchmarks, particularly on MMBench, where the score increased from 13.8 to 31.8. However, when the model size is 3B and 7B, Pixel-SAIL's performance is on par with that of the base MLLMs, which may be constrained by the current quantity (less than 2M) and quality of visual prompts and segmentation data." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 317, + 196, + 362, + 226 + ], + "blocks": [ + { + "bbox": [ + 317, + 196, + 362, + 226 + ], + "lines": [ + { + "bbox": [ + 317, + 196, + 362, + 226 + ], + "spans": [ + { + "bbox": [ + 317, + 196, + 362, + 226 + ], + "type": "image", + "image_path": "5e29fcd4523ca0d6da3e40f45f1513abc6492464fdbacd397a078097c046b16e.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 365, + 196, + 394, + 226 + ], + "blocks": [ + { + "bbox": [ + 365, + 196, + 394, + 226 + ], + "lines": [ + { + "bbox": [ + 365, + 196, + 394, + 226 + ], + "spans": [ + { + "bbox": [ + 365, + 196, + 394, + 226 + ], + "type": "image", + "image_path": "bf21200e51b1a51097e0d3cc0511c985b0448f7206b378f04cc6aff1759b35af.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 395, + 197, + 462, + 226 + ], + "blocks": [ + { + "bbox": [ + 395, + 197, + 462, + 226 + ], + "lines": [ + { + "bbox": [ + 395, + 197, + 462, + 226 + ], + "spans": [ + { + "bbox": [ + 395, + 197, + 462, + 226 + ], + "type": "image", + "image_path": "b1cbbc683726f798824e46d98003ddc8d8e08482742e724218e33775a000ce62.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 463, + 196, + 512, + 227 + ], + "blocks": [ + { + "bbox": [ + 463, + 196, + 512, + 227 + ], + "lines": [ + { + "bbox": [ + 463, + 196, + 512, + 227 + ], + "spans": [ + { + "bbox": [ + 463, + 196, + 512, + 227 + ], + "type": "image", + "image_path": "ec4c320c8bb602c970cf6d5f5b650ee874030055777473a9e2cfe4a35408d8f6.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 512, + 197, + 551, + 227 + ], + "blocks": [ + { + "bbox": [ + 512, + 197, + 551, + 227 + ], + "lines": [ + { + "bbox": [ + 512, + 197, + 551, + 227 + ], + "spans": [ + { + "bbox": [ + 512, + 197, + 551, + 227 + ], + "type": "image", + "image_path": "8514ef98ba120a98f43055533243a21f0110a475a9d3057fb2ce9a607d8d7ff4.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 317, + 232, + 411, + 264 + ], + "blocks": [ + { + "bbox": [ + 317, + 232, + 411, + 264 + ], + "lines": [ + { + "bbox": [ + 317, + 232, + 411, + 264 + ], + "spans": [ + { + "bbox": [ + 317, + 232, + 411, + 264 + ], + "type": "image", + "image_path": "4d78f012581df715eb27cab4d9168bf99ced4bfddc463564b571508b9fb66030.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 411, + 232, + 504, + 265 + ], + "blocks": [ + { + "bbox": [ + 411, + 232, + 504, + 265 + ], + "lines": [ + { + "bbox": [ + 411, + 232, + 504, + 265 + ], + "spans": [ + { + "bbox": [ + 411, + 232, + 504, + 265 + ], + "type": "image", + "image_path": "9b7217328723a584f890fb9982f3e03057486311027c5ff17f4e48f7003ad32e.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 504, + 232, + 551, + 265 + ], + "blocks": [ + { + "bbox": [ + 504, + 232, + 551, + 265 + ], + "lines": [ + { + "bbox": [ + 504, + 232, + 551, + 265 + ], + "spans": [ + { + "bbox": [ + 504, + 232, + 551, + 265 + ], + "type": "image", + "image_path": "aa0956d34ee08556b170855ddc7b77d845501068b701513f9dd4cd5aa1f0b6cb.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 317, + 268, + 411, + 300 + ], + "blocks": [ + { + "bbox": [ + 317, + 268, + 411, + 300 + ], + "lines": [ + { + "bbox": [ + 317, + 268, + 411, + 300 + ], + "spans": [ + { + "bbox": [ + 317, + 268, + 411, + 300 + ], + "type": "image", + "image_path": "a312e96e807c99c7975509f3cc30a8650cb5647a303a336f39244b88fb17a222.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 411, + 267, + 459, + 300 + ], + "blocks": [ + { + "bbox": [ + 411, + 267, + 459, + 300 + ], + "lines": [ + { + "bbox": [ + 411, + 267, + 459, + 300 + ], + "spans": [ + { + "bbox": [ + 411, + 267, + 459, + 300 + ], + "type": "image", + "image_path": "555602258ffdfb0cda0578a5201c1603ada5da1407bdc869aa0c325320e39050.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 459, + 267, + 551, + 300 + ], + "blocks": [ + { + "bbox": [ + 459, + 267, + 551, + 300 + ], + "lines": [ + { + "bbox": [ + 459, + 267, + 551, + 300 + ], + "spans": [ + { + "bbox": [ + 459, + 267, + 551, + 300 + ], + "type": "image", + "image_path": "fb4f0cc86bc8fa166e3d2d93309b4d2f49a2774966f93cbc596b8fc90285bfc2.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 317, + 300, + 398, + 342 + ], + "blocks": [ + { + "bbox": [ + 317, + 300, + 398, + 342 + ], + "lines": [ + { + "bbox": [ + 317, + 300, + 398, + 342 + ], + "spans": [ + { + "bbox": [ + 317, + 300, + 398, + 342 + ], + "type": "image", + "image_path": "f42004728503f31551f10e09916551225730c7ae2579b188ed67c1f60bb2c337.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 346, + 554, + 434 + ], + "lines": [ + { + "bbox": [ + 313, + 346, + 554, + 434 + ], + "spans": [ + { + "bbox": [ + 313, + 346, + 554, + 434 + ], + "type": "text", + "content": "Figure 4. Visualization results of Pixel-SAIL on diversity tasks. Best view it in color and zoom in. From top to bottom are visual prompt-based object caption, single/multi-object referring segmentation, vision-text referring segmentation, image caption and QA, and visual-prompt based conversation. Visual prompts in the form of points and boxes are converted into mask prompts using SAM [26]. For more visualization results and comparisons with other MLLMs, please refer to the appendix." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 399, + 300, + 459, + 342 + ], + "blocks": [ + { + "bbox": [ + 399, + 300, + 459, + 342 + ], + "lines": [ + { + "bbox": [ + 399, + 300, + 459, + 342 + ], + "spans": [ + { + "bbox": [ + 399, + 300, + 459, + 342 + ], + "type": "image", + "image_path": "1748944d684d603bdbb18ef6a58cc5f55b83c868283d1ba3cb803734338aa333.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 459, + 300, + 511, + 343 + ], + "blocks": [ + { + "bbox": [ + 459, + 300, + 511, + 343 + ], + "lines": [ + { + "bbox": [ + 459, + 300, + 511, + 343 + ], + "spans": [ + { + "bbox": [ + 459, + 300, + 511, + 343 + ], + "type": "image", + "image_path": "8c0aa32b9248a9481769b0b1ab82aaee0bd5d496ea09b8a99583223e7dc02225.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 511, + 300, + 548, + 342 + ], + "blocks": [ + { + "bbox": [ + 511, + 300, + 548, + 342 + ], + "lines": [ + { + "bbox": [ + 511, + 300, + 548, + 342 + ], + "spans": [ + { + "bbox": [ + 511, + 300, + 548, + 342 + ], + "type": "image", + "image_path": "8b9e4c68f6ea43f611dad57e5712dd70f8478ec89248fbfbeb3f59bd966df9c9.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 318, + 440, + 376, + 472 + ], + "blocks": [ + { + "bbox": [ + 318, + 440, + 376, + 472 + ], + "lines": [ + { + "bbox": [ + 318, + 440, + 376, + 472 + ], + "spans": [ + { + "bbox": [ + 318, + 440, + 376, + 472 + ], + "type": "image", + "image_path": "5a3cb155b0ff4d71d1da04b93abb3ec77b64982962df8f4621fb90b63ba49b49.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 318, + 478, + 376, + 518 + ], + "blocks": [ + { + "bbox": [ + 318, + 478, + 376, + 518 + ], + "lines": [ + { + "bbox": [ + 318, + 478, + 376, + 518 + ], + "spans": [ + { + "bbox": [ + 318, + 478, + 376, + 518 + ], + "type": "image", + "image_path": "c9e0341d3155c0159d45850ef3883c1f855c59deeb7dcf71f646e5a702932d8a.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 377, + 440, + 433, + 519 + ], + "blocks": [ + { + "bbox": [ + 377, + 440, + 433, + 519 + ], + "lines": [ + { + "bbox": [ + 377, + 440, + 433, + 519 + ], + "spans": [ + { + "bbox": [ + 377, + 440, + 433, + 519 + ], + "type": "image", + "image_path": "82720d858e972b556426d0d7a59e765573479959ded0e27c4c7c0c3bcaa4ae1e.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 434, + 440, + 492, + 479 + ], + "blocks": [ + { + "bbox": [ + 434, + 440, + 492, + 479 + ], + "lines": [ + { + "bbox": [ + 434, + 440, + 492, + 479 + ], + "spans": [ + { + "bbox": [ + 434, + 440, + 492, + 479 + ], + "type": "image", + "image_path": "576a8003516e688ddfec0f59b4c386ac5993c297f63a6a14bea084ff9ff5f7df.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 493, + 440, + 550, + 479 + ], + "blocks": [ + { + "bbox": [ + 493, + 440, + 550, + 479 + ], + "lines": [ + { + "bbox": [ + 493, + 440, + 550, + 479 + ], + "spans": [ + { + "bbox": [ + 493, + 440, + 550, + 479 + ], + "type": "image", + "image_path": "cec28f82f78a1ff66461359b6cf56928057d0bb8f10e5c03c2af24f3188d7bfc.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 317, + 520, + 376, + 558 + ], + "blocks": [ + { + "bbox": [ + 317, + 520, + 376, + 558 + ], + "lines": [ + { + "bbox": [ + 317, + 520, + 376, + 558 + ], + "spans": [ + { + "bbox": [ + 317, + 520, + 376, + 558 + ], + "type": "image", + "image_path": "ddd809653d8358f3fa9d942f2ed362b8eb1d49d2406959486c808b219fa5bd1b.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 563, + 555, + 595 + ], + "lines": [ + { + "bbox": [ + 313, + 563, + 555, + 595 + ], + "spans": [ + { + "bbox": [ + 313, + 563, + 555, + 595 + ], + "type": "text", + "content": "Figure 5. Image feature visualization results. From left to right are the image feature of the base MLLM, the image feature of Pixel-SAIL, and the mask feature of Pixel-SAIL." + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 377, + 520, + 433, + 559 + ], + "blocks": [ + { + "bbox": [ + 377, + 520, + 433, + 559 + ], + "lines": [ + { + "bbox": [ + 377, + 520, + 433, + 559 + ], + "spans": [ + { + "bbox": [ + 377, + 520, + 433, + 559 + ], + "type": "image", + "image_path": "956d0f979008f483a5008e483c6d996e4b1db14407264f0f0477c843be8c3d9a.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 433, + 520, + 492, + 559 + ], + "blocks": [ + { + "bbox": [ + 433, + 520, + 492, + 559 + ], + "lines": [ + { + "bbox": [ + 433, + 520, + 492, + 559 + ], + "spans": [ + { + "bbox": [ + 433, + 520, + 492, + 559 + ], + "type": "image", + "image_path": "80c579cddfe2ed559771ad16801a79afb29c49427d171ce49295ed1addfbe13c.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 493, + 520, + 549, + 559 + ], + "blocks": [ + { + "bbox": [ + 493, + 520, + 549, + 559 + ], + "lines": [ + { + "bbox": [ + 493, + 520, + 549, + 559 + ], + "spans": [ + { + "bbox": [ + 493, + 520, + 549, + 559 + ], + "type": "image", + "image_path": "51a610628940f7450f2cce02aab7c97bd296dcf6d63c3709ae587ebcc9940c11.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "bbox": [ + 313, + 605, + 415, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 415, + 617 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 415, + 617 + ], + "type": "text", + "content": "4.2. Ablation Studies" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 313, + 624, + 555, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 624, + 555, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 624, + 555, + 732 + ], + "type": "text", + "content": "Effectiveness of Each Component. We conduct comprehensive ablation studies on the proposed components, with results presented in Tab. 5. Our plain baseline, trained with LLaVA-665k and RefCOCO+/g data, achieves only 64.5, 57.3, and 60.1 cIoU on the RefCOCO, RefCOCO+, and RefCOCOg datasets, respectively. Moreover, this baseline completely fails on the visual prompt understanding task, attaining merely 1.0 METEOR on the region caption task. Upon incorporating the learnable upsampling mod" + } + ] + } + ], + "index": 39 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 40 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 91, + 297, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 297, + 330 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 297, + 330 + ], + "type": "text", + "content": "ule, segmentation quality improves dramatically, with the model reaching 76.2, 69.6, and " + }, + { + "bbox": [ + 55, + 91, + 297, + 330 + ], + "type": "inline_equation", + "content": "73.8\\mathrm{cIoU}" + }, + { + "bbox": [ + 55, + 91, + 297, + 330 + ], + "type": "text", + "content": " on RefCOCO, RefCOCO+, and RefCOCOg. However, the model still cannot effectively interpret user-input visual prompts due to insufficient semantic information in the object representation. When we scale up the training data by introducing substantial amounts of segmentation data and visual-prompt understanding data, the model's segmentation capabilities are further enhanced. Despite scaling the training data, the model continues to struggle with visual prompt inputs because of the limited semantic information in the object representation. After implementing our proposed visual prompt injection mechanism, the model demonstrates significant improvements in visual prompt understanding, achieving 16.1 METEOR on the region caption task. Interestingly, we observe that enhanced visual prompt understanding capabilities positively influence referring segmentation performance. Finally, incorporating the distillation strategy further refines the model's detailed segmentation quality." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 332, + 296, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 332, + 296, + 524 + ], + "spans": [ + { + "bbox": [ + 55, + 332, + 296, + 524 + ], + "type": "text", + "content": "Ablation on Various MLLMs. To demonstrate the effectiveness of Pixel-SAIL, we validate across different architectures and sizes, with results shown in Tab. 6. To reduce training costs, we use only LLaVA-665k and RefCOCO " + }, + { + "bbox": [ + 55, + 332, + 296, + 524 + ], + "type": "inline_equation", + "content": "+ / \\mathrm{g}" + }, + { + "bbox": [ + 55, + 332, + 296, + 524 + ], + "type": "text", + "content": " data for training and evaluate on the referring segmentation task. When using our modified 0.5B SOLO as the base MLLM, Pixel-SAIL achieves cIoU scores of 69.7, 62.5, and 65.3 on RefCOCO " + }, + { + "bbox": [ + 55, + 332, + 296, + 524 + ], + "type": "inline_equation", + "content": "+ / \\mathrm{g}" + }, + { + "bbox": [ + 55, + 332, + 296, + 524 + ], + "type": "text", + "content": ". When scaling the model size to 3B, Pixel-SAIL's performance improves by 3.5, 3.9, and 3.8 cIoU on RefCOCO " + }, + { + "bbox": [ + 55, + 332, + 296, + 524 + ], + "type": "inline_equation", + "content": "+ / \\mathrm{g}" + }, + { + "bbox": [ + 55, + 332, + 296, + 524 + ], + "type": "text", + "content": ". When using EVEv2-7B as the base MLLM, despite the attention between vision tokens changing from full attention to causal attention and the architecture transitioning to an MOE architecture, Pixel-SAIL achieves cIoU scores of 77.4, 70.4, and 75.2 on RefCOCO " + }, + { + "bbox": [ + 55, + 332, + 296, + 524 + ], + "type": "inline_equation", + "content": "+ / \\mathrm{g}" + }, + { + "bbox": [ + 55, + 332, + 296, + 524 + ], + "type": "text", + "content": ", demonstrating that performance consistently increases with model scaling." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 526, + 296, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 526, + 296, + 694 + ], + "spans": [ + { + "bbox": [ + 55, + 526, + 296, + 694 + ], + "type": "text", + "content": "Ablation on Data Scaling. Data plays a crucial role in the performance of Pixel-SAIL. As shown in Tab. 7, we conduct comprehensive ablation studies on the training data to evaluate its impact. When trained solely with basic data (including LLaVA-665k and RefCOCO+/g datasets), Pixel-SAIL achieves 69.7, 62.5, and 65.3 cIoU on RefCOCO, RefCOCO+, and RefCOCOg, respectively. Upon scaling the segmentation-related data, Pixel-SAIL demonstrates significant performance improvements of 6.5, 7.1, and 8.5 cIoU on these datasets. Furthermore, incorporating visual prompt data for mixed training not only enhances the model's visual prompt understanding capabilities but also yields additional performance gains of 1.2, 0.8, and 1.4 cIoU on RefCOCO, RefCOCO+, and RefCOCOg, respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 696, + 296, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 696, + 296, + 733 + ], + "spans": [ + { + "bbox": [ + 55, + 696, + 296, + 733 + ], + "type": "text", + "content": "Ablation on Distillation Strategy. Distillation is a highly effective method for infusing knowledge into Pixel-SAIL. We conduct ablation studies on the distillation strategy, and" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 91, + 555, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 555, + 236 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 555, + 236 + ], + "type": "text", + "content": "the results are presented in Tab. 8. We use the average cIoU across all splits as the evaluation metric. When only Mask2Former [12] is employed to distill high-resolution mask features, Pixel-SAIL achieves performance gains of 0.2, 0.5, and 0.3 on RefCOCO " + }, + { + "bbox": [ + 313, + 91, + 555, + 236 + ], + "type": "inline_equation", + "content": "+ / \\mathrm{g}" + }, + { + "bbox": [ + 313, + 91, + 555, + 236 + ], + "type": "text", + "content": ". When SAM2 [53] is used to distill low-resolution image features, Pixel-SAIL obtains performance improvements of 0.3, 0.4, and 0.4 on RefCOCO " + }, + { + "bbox": [ + 313, + 91, + 555, + 236 + ], + "type": "inline_equation", + "content": "+ / \\mathrm{g}" + }, + { + "bbox": [ + 313, + 91, + 555, + 236 + ], + "type": "text", + "content": ". When both teacher models are utilized collaboratively, performance gains of 0.6, 0.3, and 0.5 are achieved. Additionally, the extra computational cost introduced by the distillation strategy is minimal, increasing the training time by only about " + }, + { + "bbox": [ + 313, + 91, + 555, + 236 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 313, + 91, + 555, + 236 + ], + "type": "text", + "content": " for Pixel-SAIL-0.5B." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 242, + 441, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 242, + 441, + 254 + ], + "spans": [ + { + "bbox": [ + 313, + 242, + 441, + 254 + ], + "type": "text", + "content": "4.3. Visualization Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 258, + 555, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 258, + 555, + 307 + ], + "spans": [ + { + "bbox": [ + 313, + 258, + 555, + 307 + ], + "type": "text", + "content": "Visual Comparison. In Fig. 4, we showcase Pixel-SAIL's visualization results on diverse tasks. Pixel-SAIL flexibly interprets both visual prompts and text instruction inputs, responding with text and segmentation masks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 307, + 556, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 307, + 556, + 486 + ], + "spans": [ + { + "bbox": [ + 313, + 307, + 556, + 486 + ], + "type": "text", + "content": "Visual Affinity Map Analysis. We use PCA dimensionality reduction algorithm to visualize vision features, with results shown in Fig. 5. Our Pixel-SAIL's image features (3rd column) are denser and more diverse compared to the base MLLM's image features (2nd column). Pixel-SAIL's mask features, after the upsampling module, are denser and have better segmentation edges. Interestingly, Pixel-SAIL's image features (more focused on understanding, combining factors such as categories, colors, positions, etc.) exhibit different characteristics from mask features (more focused on perception, categories, and instances). As seen in the second row's third and fourth columns, the cars on the left and right have relatively distant feature representations in the image features, while they are very close in the mask features." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 496, + 388, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 496, + 388, + 509 + ], + "spans": [ + { + "bbox": [ + 313, + 496, + 388, + 509 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 517, + 556, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 517, + 556, + 660 + ], + "spans": [ + { + "bbox": [ + 313, + 517, + 556, + 660 + ], + "type": "text", + "content": "We explore the simplest architecture for pixel-grounded understanding tasks. In particular, we present Pixel-SAIL, which extends current SAIL-like MLLM for fine-grained understanding with three technical improvements (learnable upsampling module, new visual prompt encoding, and segmentor feature distillation). For the first time, our work proves that even without extra visual experts (visual encoder, segmentation models), one single transformer can still achieve stronger performance on four public referring segmentation benchmarks. We further introduce a more challenging benchmark, Perbench, to promote the development of pixel-MLLM community." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 660, + 556, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 660, + 556, + 732 + ], + "spans": [ + { + "bbox": [ + 313, + 660, + 556, + 732 + ], + "type": "text", + "content": "Limitation and Future Work. Our work provides the simplest solution for pixel-grounded tasks. However, one limitation is that we only adopt 1.7M data for co-training. We will further explore Pixel-SAIL on more data (for example, billion-level masks along with visual prompts [26]) for cotraining." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 90, + 115, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 90, + 115, + 102 + ], + "spans": [ + { + "bbox": [ + 56, + 90, + 115, + 102 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 110, + 294, + 731 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 61, + 110, + 294, + 164 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 110, + 294, + 164 + ], + "spans": [ + { + "bbox": [ + 61, + 110, + 294, + 164 + ], + "type": "text", + "content": "[1] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 166, + 294, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 166, + 294, + 209 + ], + "spans": [ + { + "bbox": [ + 61, + 166, + 294, + 209 + ], + "type": "text", + "content": "[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 211, + 294, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 211, + 294, + 243 + ], + "spans": [ + { + "bbox": [ + 62, + 211, + 294, + 243 + ], + "type": "text", + "content": "[3] Satanjeev Banerjee and Alon Lavie. Meteor: An automatic metric for mt evaluation with improved correlation with human judgments. ACL, 2005." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 246, + 294, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 246, + 294, + 289 + ], + "spans": [ + { + "bbox": [ + 62, + 246, + 294, + 289 + ], + "type": "text", + "content": "[4] Mu Cai, Haotian Liu, Siva Karthik Mustikovela, Gregory P. Meyer, Yuning Chai, Dennis Park, and Yong Jae Lee. Making large multimodal models understand arbitrary visual prompts. In CVPR, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 291, + 294, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 291, + 294, + 334 + ], + "spans": [ + { + "bbox": [ + 62, + 291, + 294, + 334 + ], + "type": "text", + "content": "[5] Zheng Cai, Maosong Cao, Haojiong Chen, Kai Chen, Keyu Chen, Xin Chen, Xun Chen, Zehui Chen, Zhi Chen, Pei Chu, et al. Internl m2 technical report. arXiv preprint arXiv:2403.17297, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 336, + 294, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 336, + 294, + 380 + ], + "spans": [ + { + "bbox": [ + 62, + 336, + 294, + 380 + ], + "type": "text", + "content": "[6] Lin Chen, Jisong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions. arXiv preprint arXiv:2311.12793, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 381, + 294, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 381, + 294, + 435 + ], + "spans": [ + { + "bbox": [ + 62, + 381, + 294, + 435 + ], + "type": "text", + "content": "[7] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 437, + 294, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 437, + 294, + 470 + ], + "spans": [ + { + "bbox": [ + 62, + 437, + 294, + 470 + ], + "type": "text", + "content": "[8] Yangyi Chen, Xingyao Wang, Hao Peng, and Heng Ji. A single transformer for scalable vision-language modeling. TMLR, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 472, + 294, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 472, + 294, + 515 + ], + "spans": [ + { + "bbox": [ + 62, + 472, + 294, + 515 + ], + "type": "text", + "content": "[9] Yi-Chia Chen, Wei-Hua Li, Cheng Sun, Yu-Chiang Frank Wang, and Chu-Song Chen. Sam4mllm: Enhance multimodal large language model for referring expression segmentation. ECCV, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 517, + 294, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 517, + 294, + 572 + ], + "spans": [ + { + "bbox": [ + 57, + 517, + 294, + 572 + ], + "type": "text", + "content": "[10] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 574, + 294, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 574, + 294, + 628 + ], + "spans": [ + { + "bbox": [ + 57, + 574, + 294, + 628 + ], + "type": "text", + "content": "[11] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In CVPR, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 630, + 294, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 630, + 294, + 672 + ], + "spans": [ + { + "bbox": [ + 57, + 630, + 294, + 672 + ], + "type": "text", + "content": "[12] Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In CVPR, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 675, + 294, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 675, + 294, + 708 + ], + "spans": [ + { + "bbox": [ + 57, + 675, + 294, + 708 + ], + "type": "text", + "content": "[13] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In ACL, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 710, + 294, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 710, + 294, + 731 + ], + "spans": [ + { + "bbox": [ + 57, + 710, + 294, + 731 + ], + "type": "text", + "content": "[14] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 91, + 553, + 731 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 335, + 91, + 553, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 91, + 553, + 112 + ], + "spans": [ + { + "bbox": [ + 335, + 91, + 553, + 112 + ], + "type": "text", + "content": "vision-language models. arXiv preprint arXiv:2406.11832, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 115, + 553, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 115, + 553, + 147 + ], + "spans": [ + { + "bbox": [ + 316, + 115, + 553, + 147 + ], + "type": "text", + "content": "[15] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. NeurIPS, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 148, + 553, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 148, + 553, + 192 + ], + "spans": [ + { + "bbox": [ + 317, + 148, + 553, + 192 + ], + "type": "text", + "content": "[16] Haiwen Diao, Xiaotong Li, Yufeng Cui, Yueze Wang, Haoge Deng, Ting Pan, Wenxuan Wang, Huchuan Lu, and Xinlong Wang. Eve2: Improved baselines for encoder-free vision-language models. arXiv preprint arXiv:2502.06788, 2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 194, + 553, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 194, + 553, + 225 + ], + "spans": [ + { + "bbox": [ + 317, + 194, + 553, + 225 + ], + "type": "text", + "content": "[17] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In ICCV, 2021." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 228, + 553, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 228, + 553, + 270 + ], + "spans": [ + { + "bbox": [ + 317, + 228, + 553, + 270 + ], + "type": "text", + "content": "[18] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In ACMMM, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 272, + 553, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 272, + 553, + 304 + ], + "spans": [ + { + "bbox": [ + 317, + 272, + 553, + 304 + ], + "type": "text", + "content": "[19] Guang Feng, Zhiwei Hu, Lihe Zhang, and Huchuan Lu. Encoder fusion network with co-attention embedding for referring image segmentation. In CVPR, 2021." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 306, + 553, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 306, + 553, + 360 + ], + "spans": [ + { + "bbox": [ + 317, + 306, + 553, + 360 + ], + "type": "text", + "content": "[20] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, Yunsheng Wu, and Rongrong Ji. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 362, + 553, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 362, + 553, + 394 + ], + "spans": [ + { + "bbox": [ + 316, + 362, + 553, + 394 + ], + "type": "text", + "content": "[21] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2022." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 396, + 553, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 396, + 553, + 449 + ], + "spans": [ + { + "bbox": [ + 317, + 396, + 553, + 449 + ], + "type": "text", + "content": "[22] Louis Martin Hugo Touvron, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, and et al. Llama 2: Open foundation and fine-tuned chat models. arXiv:2307.09288, 2023." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 452, + 553, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 452, + 553, + 494 + ], + "spans": [ + { + "bbox": [ + 317, + 452, + 553, + 494 + ], + "type": "text", + "content": "[23] Tianrui Hui, Si Liu, Shaofei Huang, Guanbin Li, Sansi Yu, Faxi Zhang, and Jizhong Han. Linguistic structure guided context modeling for referring image segmentation. In ECCV, 2020." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 496, + 553, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 496, + 553, + 540 + ], + "spans": [ + { + "bbox": [ + 317, + 496, + 553, + 540 + ], + "type": "text", + "content": "[24] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 541, + 553, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 541, + 553, + 574 + ], + "spans": [ + { + "bbox": [ + 317, + 541, + 553, + 574 + ], + "type": "text", + "content": "[25] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In EMNLP, 2014." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 575, + 553, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 575, + 553, + 618 + ], + "spans": [ + { + "bbox": [ + 317, + 575, + 553, + 618 + ], + "type": "text", + "content": "[26] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. ICCV, 2023." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 620, + 553, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 620, + 553, + 652 + ], + "spans": [ + { + "bbox": [ + 317, + 620, + 553, + 652 + ], + "type": "text", + "content": "[27] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In CVPR, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 654, + 553, + 697 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 654, + 553, + 697 + ], + "spans": [ + { + "bbox": [ + 317, + 654, + 553, + 697 + ], + "type": "text", + "content": "[28] Weixian Lei, Jiacong Wang, Haochen Wang, Xiangtai Li, Jun Hao Liew, Jiashi Feng, and Zilong Huang. The scalability of simplicity: Empirical analysis of vision-language learning with a single transformer. arXiv, 2025." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 699, + 553, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 699, + 553, + 731 + ], + "spans": [ + { + "bbox": [ + 317, + 699, + 553, + 731 + ], + "type": "text", + "content": "[29] Bohao Li, Yuying Ge, Yixiao Ge, Guangzhi Wang, Rui Wang, Ruimao Zhang, and Ying Shan. Seed-bench: Benchmarking multimodal large language models. In CVPR, 2024." + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 759 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 759 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 91, + 296, + 731 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 56, + 91, + 296, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 91, + 296, + 135 + ], + "spans": [ + { + "bbox": [ + 56, + 91, + 296, + 135 + ], + "type": "text", + "content": "[30] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 137, + 294, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 137, + 294, + 179 + ], + "spans": [ + { + "bbox": [ + 56, + 137, + 294, + 179 + ], + "type": "text", + "content": "[31] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In ICML, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 181, + 294, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 181, + 294, + 224 + ], + "spans": [ + { + "bbox": [ + 56, + 181, + 294, + 224 + ], + "type": "text", + "content": "[32] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 226, + 294, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 226, + 294, + 270 + ], + "spans": [ + { + "bbox": [ + 56, + 226, + 294, + 270 + ], + "type": "text", + "content": "[33] Xiangtai Li, Haobo Yuan, Wei Li, Henghui Ding, Size Wu, Wenwei Zhang, Yining Li, Kai Chen, and Chen Change Loy. Omg-seg: Is one model good enough for all segmentation? In CVPR, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 271, + 294, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 271, + 294, + 315 + ], + "spans": [ + { + "bbox": [ + 56, + 271, + 294, + 315 + ], + "type": "text", + "content": "[34] Xiaotong Li, Fan Zhang, Haiwen Diao, Yueze Wang, Xinlong Wang, and Ling-Yu Duan. Densefusion-1m: Merging vision experts for comprehensive multimodal perception. arXiv preprint arXiv:2407.08303, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 316, + 294, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 316, + 294, + 348 + ], + "spans": [ + { + "bbox": [ + 56, + 316, + 294, + 348 + ], + "type": "text", + "content": "[35] Yanghao Li, Hanzi Mao, Ross Girshick, and Kaiming He. Exploring plain vision transformer backbones for object detection. ECCV, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 350, + 294, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 350, + 294, + 393 + ], + "spans": [ + { + "bbox": [ + 56, + 350, + 294, + 393 + ], + "type": "text", + "content": "[36] Chen Liang, Wenguan Wang, Tianfei Zhou, Jiaxu Miao, Yawei Luo, and Yi Yang. Local-global context aware transformer for language-guided video segmentation. arXiv preprint arXiv:2203.09773, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 395, + 294, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 395, + 294, + 437 + ], + "spans": [ + { + "bbox": [ + 56, + 395, + 294, + 437 + ], + "type": "text", + "content": "[37] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 439, + 294, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 439, + 294, + 494 + ], + "spans": [ + { + "bbox": [ + 56, + 439, + 294, + 494 + ], + "type": "text", + "content": "[38] Weifeng Lin, Xinyu Wei, Ruichuan An, Peng Gao, Bocheng Zou, Yulin Luo, Siyuan Huang, Shanghang Zhang, and Hongsheng Li. Draw-and-understand: Leveraging visual prompts to enable mllms to comprehend what you want. arXiv preprint arXiv:2403.20271, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 495, + 294, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 495, + 294, + 517 + ], + "spans": [ + { + "bbox": [ + 56, + 495, + 294, + 517 + ], + "type": "text", + "content": "[39] Chang Liu, Henghui Ding, and Xudong Jiang. GRES: Generalized referring expression segmentation. In CVPR, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 518, + 294, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 518, + 294, + 540 + ], + "spans": [ + { + "bbox": [ + 56, + 518, + 294, + 540 + ], + "type": "text", + "content": "[40] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 541, + 294, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 541, + 294, + 573 + ], + "spans": [ + { + "bbox": [ + 56, + 541, + 294, + 573 + ], + "type": "text", + "content": "[41] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 574, + 294, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 574, + 294, + 618 + ], + "spans": [ + { + "bbox": [ + 56, + 574, + 294, + 618 + ], + "type": "text", + "content": "[42] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. *Polyformer: Referring image segmentation as sequential polygon generation*. *CVPR*, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 620, + 294, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 620, + 294, + 663 + ], + "spans": [ + { + "bbox": [ + 56, + 620, + 294, + 663 + ], + "type": "text", + "content": "[43] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In ECCV, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 664, + 294, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 664, + 294, + 686 + ], + "spans": [ + { + "bbox": [ + 56, + 664, + 294, + 686 + ], + "type": "text", + "content": "[44] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint, 2017." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 687, + 294, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 687, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 56, + 687, + 294, + 709 + ], + "type": "text", + "content": "[45] Timo Lüddecke and Alexander Ecker. Image segmentation using text and image prompts. In CVPR, 2022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 710, + 294, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 710, + 294, + 731 + ], + "spans": [ + { + "bbox": [ + 56, + 710, + 294, + 731 + ], + "type": "text", + "content": "[46] Gen Luo, Xue Yang, Wenhan Dou, Zhaokai Wang, Jifeng Dai, Yu Qiao, and Xizhou Zhu. Mono-internvl: Pushing the" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 91, + 553, + 731 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 333, + 91, + 553, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 91, + 553, + 114 + ], + "spans": [ + { + "bbox": [ + 333, + 91, + 553, + 114 + ], + "type": "text", + "content": "boundaries of monolithic multimodal large language models with endogenous visual pre-training. CVPR, 2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 115, + 553, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 115, + 553, + 148 + ], + "spans": [ + { + "bbox": [ + 316, + 115, + 553, + 148 + ], + "type": "text", + "content": "[47] Chuofan Ma, Yi Jiang, Jiannan Wu, Zehuan Yuan, and Xiaojuan Qi. Groma: Localized visual tokenization for grounding multimodal large language models. In ECCV, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 149, + 553, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 149, + 553, + 203 + ], + "spans": [ + { + "bbox": [ + 316, + 149, + 553, + 203 + ], + "type": "text", + "content": "[48] Shehan Munasinghe, Hanan Gani, Wenqi Zhu, Jiale Cao, Eric Xing, Fahad Shahbaz Khan, and Salman Khan. Videoglamm: A large multimodal model for pixel-level visual grounding in videos. arXiv preprint arXiv:2411.04923, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 205, + 553, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 205, + 553, + 248 + ], + "spans": [ + { + "bbox": [ + 316, + 205, + 553, + 248 + ], + "type": "text", + "content": "[49] Lu Qi, Yi-Wen Chen, Lehan Yang, Tiancheng Shen, Xiangtai Li, Weidong Guo, Yu Xu, and Ming-Hsuan Yang. Generalizable entity grounding via assistance of large language model. arXiv preprint arXiv:2402.02555, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 250, + 553, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 250, + 553, + 304 + ], + "spans": [ + { + "bbox": [ + 316, + 250, + 553, + 304 + ], + "type": "text", + "content": "[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 305, + 553, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 305, + 553, + 360 + ], + "spans": [ + { + "bbox": [ + 316, + 305, + 553, + 360 + ], + "type": "text", + "content": "[51] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M. Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S. Khan. Glamm: Pixel grounding large multimodal model. In CVPR, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 361, + 553, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 361, + 553, + 405 + ], + "spans": [ + { + "bbox": [ + 316, + 361, + 553, + 405 + ], + "type": "text", + "content": "[52] Jeff Rasley, Samyam Rajbhandari, Olatunj Ruwase, and Yuxiong He. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In SIGKDD, 2020." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 407, + 553, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 407, + 553, + 460 + ], + "spans": [ + { + "bbox": [ + 316, + 407, + 553, + 460 + ], + "type": "text", + "content": "[53] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 462, + 553, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 462, + 553, + 496 + ], + "spans": [ + { + "bbox": [ + 316, + 462, + 553, + 496 + ], + "type": "text", + "content": "[54] Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. In CVPR, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 498, + 553, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 498, + 553, + 562 + ], + "spans": [ + { + "bbox": [ + 316, + 498, + 553, + 562 + ], + "type": "text", + "content": "[55] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Austin Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. In NeurIPS, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 563, + 553, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 563, + 553, + 629 + ], + "spans": [ + { + "bbox": [ + 316, + 563, + 553, + 629 + ], + "type": "text", + "content": "[56] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. Llama: Open and efficient foundation language models. arXiv:2302.13971, 2023." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 631, + 553, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 631, + 553, + 662 + ], + "spans": [ + { + "bbox": [ + 316, + 631, + 553, + 662 + ], + "type": "text", + "content": "[57] Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. Cider: Consensus-based image description evaluation. CVPR, 2015." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 664, + 553, + 697 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 664, + 553, + 697 + ], + "spans": [ + { + "bbox": [ + 316, + 664, + 553, + 697 + ], + "type": "text", + "content": "[58] Jiaqi Wang, Pan Zhang, Tao Chu, Yuhang Cao, Yujie Zhou, Tong Wu, Bin Wang, Conghui He, and Dahua Lin. V3det: Vast vocabulary visual detection dataset. In ICCV, 2023." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 699, + 553, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 699, + 553, + 731 + ], + "spans": [ + { + "bbox": [ + 316, + 699, + 553, + 731 + ], + "type": "text", + "content": "[59] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In CVPR, 2022." + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 91, + 296, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 91, + 296, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 91, + 296, + 125 + ], + "spans": [ + { + "bbox": [ + 56, + 91, + 296, + 125 + ], + "type": "text", + "content": "[60] Cong Wei, Haoxian Tan, Yujie Zhong, Yujiu Yang, and Lin Ma. LaSagnA: Language-based segmentation assistant for complex queries. arXiv preprint arXiv:2404.08506, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 126, + 296, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 126, + 296, + 170 + ], + "spans": [ + { + "bbox": [ + 56, + 126, + 296, + 170 + ], + "type": "text", + "content": "[61] Jianzong Wu, Xiangtai Li, Shilin Xu, Haobo Yuan, Henghui Ding, Yibo Yang, Xia Li, Jiangning Zhang, Yunhai Tong, Xudong Jiang, Bernard Ghanem, and Dacheng Tao. Towards open vocabulary learning: A survey. arXiv pre-print, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 171, + 294, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 171, + 294, + 203 + ], + "spans": [ + { + "bbox": [ + 56, + 171, + 294, + 203 + ], + "type": "text", + "content": "[62] Size Wu, Sheng Jin, Wenwei Zhang, Lumin Xu, Wentao Liu, Wei Li, and Chen Change Loy. F-lmm: Grounding frozen large multimodal models. CVPR, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 205, + 294, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 205, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 205, + 294, + 239 + ], + "type": "text", + "content": "[63] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In CVPR, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 240, + 294, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 240, + 294, + 282 + ], + "spans": [ + { + "bbox": [ + 56, + 240, + 294, + 282 + ], + "type": "text", + "content": "[64] Hu Xu, Saining Xie, Xiaoqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. arXiv preprint arXiv:2309.16671, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 285, + 294, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 285, + 294, + 449 + ], + "spans": [ + { + "bbox": [ + 56, + 285, + 294, + 449 + ], + "type": "text", + "content": "[65] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 450, + 294, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 450, + 294, + 494 + ], + "spans": [ + { + "bbox": [ + 56, + 450, + 294, + 494 + ], + "type": "text", + "content": "[66] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 495, + 294, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 495, + 294, + 538 + ], + "spans": [ + { + "bbox": [ + 56, + 495, + 294, + 538 + ], + "type": "text", + "content": "[67] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 540, + 294, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 540, + 294, + 583 + ], + "spans": [ + { + "bbox": [ + 56, + 540, + 294, + 583 + ], + "type": "text", + "content": "[68] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 586, + 294, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 586, + 294, + 628 + ], + "spans": [ + { + "bbox": [ + 56, + 586, + 294, + 628 + ], + "type": "text", + "content": "[69] Zuyao You, Junke Wang, Lingyu Kong, Bo He, and Zuxuan Wu. Pix2cap-coco: Advancing visual comprehension via pixel-level captioning. arXiv preprint arXiv:2501.13893, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 632, + 294, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 632, + 294, + 663 + ], + "spans": [ + { + "bbox": [ + 56, + 632, + 294, + 663 + ], + "type": "text", + "content": "[70] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In ECCV, 2016." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 666, + 294, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 666, + 294, + 708 + ], + "spans": [ + { + "bbox": [ + 56, + 666, + 294, + 708 + ], + "type": "text", + "content": "[71] Haobo Yuan, Xiangtai Li, Chong Zhou, Yining Li, Kai Chen, and Chen Change Loy. Open-vocabulary sam: Segment and recognize twenty-thousand classes interactively. arXiv preprint, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 710, + 294, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 710, + 294, + 732 + ], + "spans": [ + { + "bbox": [ + 56, + 710, + 294, + 732 + ], + "type": "text", + "content": "[72] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 91, + 553, + 583 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 333, + 91, + 553, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 91, + 553, + 125 + ], + "spans": [ + { + "bbox": [ + 333, + 91, + 553, + 125 + ], + "type": "text", + "content": "Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv preprint arXiv:2501.04001, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 126, + 553, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 126, + 553, + 169 + ], + "spans": [ + { + "bbox": [ + 316, + 126, + 553, + 169 + ], + "type": "text", + "content": "[73] Haobo Yuan, Tao Zhang, Xiangtai Li, Lu Qi, Zilong Huang, Shilin Xu, Jiashi Feng, and Ming-Hsuan Yang. 4th pvuw mevis 3rd place report: Sa2va. arXiv preprint arXiv:2504.00476, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 171, + 553, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 171, + 553, + 203 + ], + "spans": [ + { + "bbox": [ + 316, + 171, + 553, + 203 + ], + "type": "text", + "content": "[74] Yuqian Yuan, Wentong Li, Jian Liu, Dongqi Tang, Xinjie Luo, Chi Qin, Lei Zhang, and Jianke Zhu. Osprey: Pixel understanding with visual instruction tuning. In CVPR, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 205, + 553, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 205, + 553, + 236 + ], + "spans": [ + { + "bbox": [ + 316, + 205, + 553, + 236 + ], + "type": "text", + "content": "[75] Alireza Zareian, Kevin Dela Rosa, Derek Hao Hu, and Shih-Fu Chang. Open-vocabulary object detection using captions. In CVPR, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 238, + 553, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 238, + 553, + 270 + ], + "spans": [ + { + "bbox": [ + 316, + 238, + 553, + 270 + ], + "type": "text", + "content": "[76] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 272, + 553, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 272, + 553, + 360 + ], + "spans": [ + { + "bbox": [ + 316, + 272, + 553, + 360 + ], + "type": "text", + "content": "[77] Pan Zhang, Xiaoyi Dong, Bin Wang, Yuhang Cao, Chao Xu, Linke Ouyang, Zhiyuan Zhao, Shuangrui Ding, Songyang Zhang, Haodong Duan, Wenwei Zhang, Hang Yan, Xinyue Zhang, Wei Li, Jingwen Li, Kai Chen, Conghui He, Xingcheng Zhang, Yu Qiao, Dahua Lin, and Jiaqi Wang. Internlm-xcomposer: A vision-language large model for advanced text-image comprehension and composition. arXiv preprint arXiv:2309.15112, 2023." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 361, + 553, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 361, + 553, + 393 + ], + "spans": [ + { + "bbox": [ + 316, + 361, + 553, + 393 + ], + "type": "text", + "content": "[78] Tao Zhang, Xingye Tian, Yu Wu, Shunping Ji, Xuebo Wang, Yuan Zhang, and Pengfei Wan. DVIS: Decoupled video instance segmentation framework. In ICCV, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 395, + 553, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 395, + 553, + 449 + ], + "spans": [ + { + "bbox": [ + 316, + 395, + 553, + 449 + ], + "type": "text", + "content": "[79] Tao Zhang, Xingye Tian, Yikang Zhou, Shunping Ji, Xuebo Wang, Xin Tao, Yuan Zhang, Pengfei Wan, Zhongyuan Wang, and Yu Wu. Dvis++: Improved decoupled framework for universal video segmentation. arXiv preprint arXiv:2312.13305, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 450, + 553, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 450, + 553, + 494 + ], + "spans": [ + { + "bbox": [ + 316, + 450, + 553, + 494 + ], + "type": "text", + "content": "[80] Tao Zhang, Xiangtai Li, Hao Fei, Haobo Yuan, Shengqiong Wu, Shunping Ji, Change Loy Chen, and Shuicheng Yan. Omg-llava: Bridging image-level, object-level, pixel-level reasoning and understanding. In NeurIPS, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 495, + 553, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 495, + 553, + 528 + ], + "spans": [ + { + "bbox": [ + 316, + 495, + 553, + 528 + ], + "type": "text", + "content": "[81] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In CVPR, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 529, + 553, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 529, + 553, + 583 + ], + "spans": [ + { + "bbox": [ + 316, + 529, + 553, + 583 + ], + "type": "text", + "content": "[82] Yikang Zhou, Tao Zhang, Shilin Xu, Shihao Chen, Qianyu Zhou, Yunhai Tong, Shunping Ji, Jiangning Zhang, Xiangtai Li, and Lu Qi. Are they the same? exploring visual correspondence shortcomings of multimodal llms. arXiv preprint arXiv:2501.04670, 2025." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 94, + 86, + 516, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 86, + 516, + 104 + ], + "spans": [ + { + "bbox": [ + 94, + 86, + 516, + 104 + ], + "type": "text", + "content": "Pixel-SAIL: Single Transformer For Pixel-Grounded Understanding" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 234, + 113, + 376, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 113, + 376, + 129 + ], + "spans": [ + { + "bbox": [ + 234, + 113, + 376, + 129 + ], + "type": "text", + "content": "Supplementary Material" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 143, + 295, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 143, + 295, + 214 + ], + "spans": [ + { + "bbox": [ + 55, + 143, + 295, + 214 + ], + "type": "text", + "content": "We first present more details on training and testing of our Pixel-SAIL in Sec. 6. Then, we present the detailed benchmark building process, in Sec. 7 and more challenging examples in PerBench in Sec. 8. Next, we present more comparison with current state-of-the-art pixel-grounded MLLMs, in Sec. 9." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 228, + 253, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 228, + 253, + 243 + ], + "spans": [ + { + "bbox": [ + 55, + 228, + 253, + 243 + ], + "type": "text", + "content": "6. More Detailed Training and Testing" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 250, + 295, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 250, + 295, + 512 + ], + "spans": [ + { + "bbox": [ + 56, + 250, + 295, + 512 + ], + "type": "text", + "content": "Training. We will present more details about the training, including dataset sampling specifications and distillation methodology. For the RefCOCO series [25, 70] datasets, we randomly sample 5 referring expressions per image and organize them into a multi-round dialogue format as a single training data point, processing all images for four epochs. For COCO [37] data, we sample 5 categories per image and randomly select either instance mode or semantic mode to structure the responses. In instance mode, objects are arranged by their center points from left to right. We process the COCO dataset for one epoch. For Pixel2Cap [69], our generated detailed object caption data, and Osprey [74] object description data, we randomly sample 1-5 visual prompts per image and randomly incorporate questions about non-existent visual prompts, with responses indicating that these visual prompts do not exist. These object caption datasets are processed for five epochs. For other segmentation-related or visual prompt-related data, we conduct one epoch. For LLaVA-665k, we randomly sample at a 1:1 ratio alongside other data for joint training to ensure that the base MLLM's instruction-following capability remains intact." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 514, + 295, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 514, + 295, + 574 + ], + "spans": [ + { + "bbox": [ + 55, + 514, + 295, + 574 + ], + "type": "text", + "content": "When the length of input tokens (including the length of vision tokens) exceeds 8192, we truncate the excess portion. For the 0.5B model, we use DeepSpeed Zero-1 [52] for training, and for the 3B and 7B models, we use DeepSpeed Zero-2 [52] for training." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 575, + 295, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 575, + 295, + 682 + ], + "spans": [ + { + "bbox": [ + 55, + 575, + 295, + 682 + ], + "type": "text", + "content": "We distill the mask features generated by the Mask2Former [12] pixel decoder and the lowest resolution features generated by the SAM2 [53] image encoder onto the upsampled mask features from Pixel-SAIL and the image features directly reshaped from vision tokens, respectively. We use bilinear interpolation to align spatial dimensions and implement a learnable linear layer to align the channel size. The distillation process employs MSE loss with a weight of 0.5." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 684, + 295, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 684, + 295, + 731 + ], + "spans": [ + { + "bbox": [ + 55, + 684, + 295, + 731 + ], + "type": "text", + "content": "Testing. We have elaborated on the testing details of Pixel-SAIL on pixel-grounded benchmarks in the main text. For general image question answering benchmarks, we follow the prompt settings of the base MLLMs and use" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 143, + 553, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 143, + 553, + 167 + ], + "spans": [ + { + "bbox": [ + 313, + 143, + 553, + 167 + ], + "type": "text", + "content": "VLMEvalKit [18] for evaluation, without using additional LLM assistance to identify answers." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 177, + 553, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 177, + 553, + 205 + ], + "spans": [ + { + "bbox": [ + 313, + 177, + 553, + 205 + ], + "type": "text", + "content": "7. More Detailed Process on Benchmarking Building" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 211, + 553, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 211, + 553, + 258 + ], + "spans": [ + { + "bbox": [ + 313, + 211, + 553, + 258 + ], + "type": "text", + "content": "The construction of PerBench combines an automated model-generated pipeline with manual screening, correction, and annotation. The process is divided into three stages." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 259, + 553, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 259, + 553, + 378 + ], + "spans": [ + { + "bbox": [ + 313, + 259, + 553, + 378 + ], + "type": "text", + "content": "The first stage involves annotating detailed object captions. We crop objects and draw visual prompts on the original images to prompt InternVL-2.5 78B [10] and Qwen-VL 2.5 72B [2] to generate detailed captions for the objects. These captions are then cross-validated using Qwen2.5 72B [66]. If all captions are consistent, they are integrated using an LLM; otherwise, the data are discarded. After the model automatically generates the detailed object captions, we manually select and correct 500 of them to form the final 500 detailed object caption data points in the benchmark." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 380, + 553, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 380, + 553, + 462 + ], + "spans": [ + { + "bbox": [ + 313, + 380, + 553, + 462 + ], + "type": "text", + "content": "The second stage focuses on annotating visual-prompt question-answering data in an MCQ (Multiple Choice Question) format. In this phase, we manually generate a multiple-choice question for each object caption obtained from the first stage. After completing the annotations, two quality control specialists perform cross-verification to identify and rectify any potential errors." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 463, + 553, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 463, + 553, + 568 + ], + "spans": [ + { + "bbox": [ + 313, + 463, + 553, + 568 + ], + "type": "text", + "content": "The final stage contains the annotation of visual-text referring segmentation data. At this stage, we manually select and annotate object segmentation masks, referring visual prompts, and text from SAM images. During the annotation process, we consider various factors such as positional relationships, event relationships, appearance, size, and more, including cases with both single and multiple visual prompts. Once the annotation is complete, two individuals review it, and correct the errors." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 580, + 519, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 580, + 519, + 594 + ], + "spans": [ + { + "bbox": [ + 313, + 580, + 519, + 594 + ], + "type": "text", + "content": "8. More Challenging Cases in PerBench" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 600, + 553, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 600, + 553, + 695 + ], + "spans": [ + { + "bbox": [ + 313, + 600, + 553, + 695 + ], + "type": "text", + "content": "We present more detailed object caption samples from our PerBench in Fig. 6. The objects are derived from diverse senses and categories, encompassing humans, man-made objects, and natural landscapes. The object captions include basic categories, attributes, purposes, and relationships with surrounding objects. This high-quality benchmark will effectively advance the development of the visual prompt understanding community." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 696, + 553, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 696, + 553, + 731 + ], + "spans": [ + { + "bbox": [ + 313, + 696, + 553, + 731 + ], + "type": "text", + "content": "More referring segmentation samples are illustrated in Fig. 7. Our manually annotated samples cover a variety of scenes, such as indoor and outdoor settings, and include" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 308, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 308, + 759 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 308, + 759 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 90, + 169, + 182 + ], + "blocks": [ + { + "bbox": [ + 63, + 90, + 169, + 182 + ], + "lines": [ + { + "bbox": [ + 63, + 90, + 169, + 182 + ], + "spans": [ + { + "bbox": [ + 63, + 90, + 169, + 182 + ], + "type": "image", + "image_path": "54f00460d44c59b795de71e7634a25e6d01f0a7ce2b703a11af4760339be0174.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 170, + 92, + 312, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 92, + 312, + 212 + ], + "spans": [ + { + "bbox": [ + 170, + 92, + 312, + 212 + ], + "type": "text", + "content": "The object is a person standing behind a wooden podium covered with a blue cloth, addressing an audience outdoors. The podium has an emblem, and the person is dressed in a dark blue jacket with a logo, a scarf, and a white shirt. The audience includes people seated on a stool and a folding chair, with trees, parked cars, and a building with large windows visible in the background." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 315, + 90, + 377, + 182 + ], + "blocks": [ + { + "bbox": [ + 315, + 90, + 377, + 182 + ], + "lines": [ + { + "bbox": [ + 315, + 90, + 377, + 182 + ], + "spans": [ + { + "bbox": [ + 315, + 90, + 377, + 182 + ], + "type": "image", + "image_path": "26dbd4c246279b4fc314092ff22dad26f9b64219d0101f942ada95ce6e793161.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 378, + 93, + 541, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 93, + 541, + 222 + ], + "spans": [ + { + "bbox": [ + 378, + 93, + 541, + 222 + ], + "type": "text", + "content": "The object is a bronze statue of a young boy with exaggerated features. The statue is dressed in a formal outfit including a suit jacket, vest, and shorts, and it wears flip-flops. The boy holds a small object resembling a ball in his right hand. The statue's whimsical and playful appearance, characterized by exaggerated proportions and a sense of movement, is set on a rock-like structure in an urban street scene with a pedestrian crossing visible in the background." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 386, + 222, + 546, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 222, + 546, + 361 + ], + "spans": [ + { + "bbox": [ + 386, + 222, + 546, + 361 + ], + "type": "text", + "content": "The object is a Smart Fortwo, a compact city car known for its small size and maneuverability. This car features predominantly white with green accents, including green wheels and trim. Designed for urban environments with limited parking space, it is parked on a sidewalk next to a charging station, indicating its electric vehicle status. The charging cable is connected, suggesting it is currently being charged. The surrounding area includes a paved road with multiple lanes and a grassy area separated by a curb." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 63, + 225, + 154, + 356 + ], + "blocks": [ + { + "bbox": [ + 63, + 225, + 154, + 356 + ], + "lines": [ + { + "bbox": [ + 63, + 225, + 154, + 356 + ], + "spans": [ + { + "bbox": [ + 63, + 225, + 154, + 356 + ], + "type": "image", + "image_path": "2186d7b541c3ac8b0b1e6f888aabb7d0139e70b68d4899e20fc2e63d36c16d9c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 156, + 223, + 293, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 223, + 293, + 363 + ], + "spans": [ + { + "bbox": [ + 156, + 223, + 293, + 363 + ], + "type": "text", + "content": "The object is a person dressed in black and blue attire, standing on skis in a snowy outdoor setting, preparing for or participating in a skiing event. This person is wearing a black beanie, a black puffy jacket, black gloves, black pants, and a blue bib with the number \"1\" in red. They also have ski poles and ski boots. In the background, there are other people skiing or standing around, along with parked cars and trees, indicating a recreational or competitive skiing area." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 294, + 249, + 385, + 316 + ], + "blocks": [ + { + "bbox": [ + 294, + 249, + 385, + 316 + ], + "lines": [ + { + "bbox": [ + 294, + 249, + 385, + 316 + ], + "spans": [ + { + "bbox": [ + 294, + 249, + 385, + 316 + ], + "type": "image", + "image_path": "52758840947ce05f90ecc252c8b5c5818b4964a9f94be12806665b38888ffaa2.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 65, + 365, + 156, + 499 + ], + "blocks": [ + { + "bbox": [ + 65, + 365, + 156, + 499 + ], + "lines": [ + { + "bbox": [ + 65, + 365, + 156, + 499 + ], + "spans": [ + { + "bbox": [ + 65, + 365, + 156, + 499 + ], + "type": "image", + "image_path": "3a6e35850e62808d80931bfa589b802dcb84f7e252ccaf4415e54f89948ea1b6.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 156, + 368, + 271, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 368, + 271, + 496 + ], + "spans": [ + { + "bbox": [ + 156, + 368, + 271, + 496 + ], + "type": "text", + "content": "The object is a stack of white bangles adorned with small, colorful stones. These bangles, part of traditional jewelry, are worn on the arm of a person dressed in vibrant traditional attire, including a yellow scarf with colorful patterns and a floral-patterned garment, complementing the overall colorful and cultural appearance." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 274, + 394, + 384, + 469 + ], + "blocks": [ + { + "bbox": [ + 274, + 394, + 384, + 469 + ], + "lines": [ + { + "bbox": [ + 274, + 394, + 384, + 469 + ], + "spans": [ + { + "bbox": [ + 274, + 394, + 384, + 469 + ], + "type": "image", + "image_path": "7300f72a106255187cf4c86e7b5a5dc72806c38f843ff35d4057d0d97f0a0ca6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 385, + 365, + 545, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 385, + 365, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 385, + 365, + 545, + 491 + ], + "type": "text", + "content": "The object is a waterfall cascading down a steep, rocky cliff. The waterfall serves as the central focus, with water flowing smoothly from top to bottom. The area around the waterfall features rugged, uneven rock surfaces and patches of green vegetation. The cliff is part of a larger mountainous or hilly terrain, with dense foliage at the top. The waterfall's flow and movement create a striking contrast against the dark, textured rocks, highlighting the natural beauty of the scene." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 63, + 523, + 187, + 608 + ], + "blocks": [ + { + "bbox": [ + 63, + 523, + 187, + 608 + ], + "lines": [ + { + "bbox": [ + 63, + 523, + 187, + 608 + ], + "spans": [ + { + "bbox": [ + 63, + 523, + 187, + 608 + ], + "type": "image", + "image_path": "25c68c43191bca0b4140046d5bba17dabfcc96d22a4b3288b17449104cd462b6.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 649, + 468, + 661 + ], + "lines": [ + { + "bbox": [ + 140, + 649, + 468, + 661 + ], + "spans": [ + { + "bbox": [ + 140, + 649, + 468, + 661 + ], + "type": "text", + "content": "Figure 6. More visualization examples of detailed object captions from our PerBench." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 189, + 502, + 281, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 502, + 281, + 639 + ], + "spans": [ + { + "bbox": [ + 189, + 502, + 281, + 639 + ], + "type": "text", + "content": "The object is a black shoulder bag with a flap closure. It is made of a synthetic or leather-like material, has a smooth texture and sheen, and is carried over the shoulder of a person walking in a grassy area near a wooden fence and a small white stool, dressed in a black top and pink shorts." + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 287, + 516, + 401, + 591 + ], + "blocks": [ + { + "bbox": [ + 287, + 516, + 401, + 591 + ], + "lines": [ + { + "bbox": [ + 287, + 516, + 401, + 591 + ], + "spans": [ + { + "bbox": [ + 287, + 516, + 401, + 591 + ], + "type": "image", + "image_path": "9440ef894fa079e5258e6dfd1b6a1c3ddfd1826198e0db08f174e3db7c0242e4.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 402, + 491, + 548, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 491, + 548, + 639 + ], + "spans": [ + { + "bbox": [ + 402, + 491, + 548, + 639 + ], + "type": "text", + "content": "The object is a navigational buoy used in maritime environments. It is a cylindrical structure with a pointed top, primarily white in color, featuring a pink top and a distinctive band. Mounted on a black base, it is situated in a body of water such as a harbor or marina, surrounded by other boats and buoys. Its function is to guide vessels through waterways, aiding navigation with its unique color pattern. This buoy is part of a larger maritime setting, often near populated areas or popular boating destinations." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 55, + 681, + 295, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 681, + 295, + 730 + ], + "spans": [ + { + "bbox": [ + 55, + 681, + 295, + 730 + ], + "type": "text", + "content": "objects of multiple granularities. The referring text encompasses positional relationships, event relationships, and more. This new task is more challenging than current pure text referring segmentation tasks." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 64, + 88, + 539, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 88, + 539, + 100 + ], + "spans": [ + { + "bbox": [ + 64, + 88, + 539, + 100 + ], + "type": "text", + "content": "Q: Please segment which object " + }, + { + "bbox": [ + 64, + 88, + 539, + 100 + ], + "type": "inline_equation", + "content": "\\langle \\text{vp\\_0} \\rangle" + }, + { + "bbox": [ + 64, + 88, + 539, + 100 + ], + "type": "text", + "content": " is using. A: [SEG] Q: Please segment the " + }, + { + "bbox": [ + 64, + 88, + 539, + 100 + ], + "type": "inline_equation", + "content": "\\langle \\text{vp\\_0} \\rangle" + }, + { + "bbox": [ + 64, + 88, + 539, + 100 + ], + "type": "text", + "content": " that is sitting. A: [SEG]" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 63, + 101, + 307, + 184 + ], + "blocks": [ + { + "bbox": [ + 63, + 101, + 307, + 184 + ], + "lines": [ + { + "bbox": [ + 63, + 101, + 307, + 184 + ], + "spans": [ + { + "bbox": [ + 63, + 101, + 307, + 184 + ], + "type": "image", + "image_path": "637e2c6c160d921c8e8eb7a912d02ef64fb5d5f496a09547497acdb12cb96449.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 308, + 101, + 543, + 184 + ], + "blocks": [ + { + "bbox": [ + 308, + 101, + 543, + 184 + ], + "lines": [ + { + "bbox": [ + 308, + 101, + 543, + 184 + ], + "spans": [ + { + "bbox": [ + 308, + 101, + 543, + 184 + ], + "type": "image", + "image_path": "b3cba3595b4d3ca756e3999aab583c70696f7b48d9f22af4691894b857494ece.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 64, + 200, + 289, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 200, + 289, + 211 + ], + "spans": [ + { + "bbox": [ + 64, + 200, + 289, + 211 + ], + "type": "text", + "content": "Q: Please segment where will arrive. A: [SEG]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 310, + 190, + 506, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 190, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 310, + 190, + 506, + 212 + ], + "type": "text", + "content": "Q: Please segment the object that blocks the sunlight for " + }, + { + "bbox": [ + 310, + 190, + 506, + 212 + ], + "type": "inline_equation", + "content": "<\\mathrm{vp\\_0}" + }, + { + "bbox": [ + 310, + 190, + 506, + 212 + ], + "type": "text", + "content": ". A: [SEG]" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 63, + 212, + 307, + 294 + ], + "blocks": [ + { + "bbox": [ + 63, + 212, + 307, + 294 + ], + "lines": [ + { + "bbox": [ + 63, + 212, + 307, + 294 + ], + "spans": [ + { + "bbox": [ + 63, + 212, + 307, + 294 + ], + "type": "image", + "image_path": "c68060ea861a6cbea6396bbf326c799ca8df216d62e6c88c6bcb08edd708f8c3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 308, + 212, + 547, + 294 + ], + "blocks": [ + { + "bbox": [ + 308, + 212, + 547, + 294 + ], + "lines": [ + { + "bbox": [ + 308, + 212, + 547, + 294 + ], + "spans": [ + { + "bbox": [ + 308, + 212, + 547, + 294 + ], + "type": "image", + "image_path": "a18d58ffa3b9e5997496a5e5cb52fc23eae362b5fdf646c42c3a31b431a2d3c0.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 64, + 295, + 268, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 295, + 268, + 316 + ], + "spans": [ + { + "bbox": [ + 64, + 295, + 268, + 316 + ], + "type": "text", + "content": "Q: Please segment the letter closest to . \nA: [SEG]" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 309, + 295, + 538, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 295, + 538, + 316 + ], + "spans": [ + { + "bbox": [ + 309, + 295, + 538, + 316 + ], + "type": "text", + "content": "Q: Please segment the object that is enjoying. \nA: [SEG]" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 63, + 317, + 306, + 399 + ], + "blocks": [ + { + "bbox": [ + 63, + 317, + 306, + 399 + ], + "lines": [ + { + "bbox": [ + 63, + 317, + 306, + 399 + ], + "spans": [ + { + "bbox": [ + 63, + 317, + 306, + 399 + ], + "type": "image", + "image_path": "21d42f32578b84b87baec4d382d3e198b1d4bb07238b436c05ce0aa66b7c57da.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 307, + 318, + 548, + 399 + ], + "blocks": [ + { + "bbox": [ + 307, + 318, + 548, + 399 + ], + "lines": [ + { + "bbox": [ + 307, + 318, + 548, + 399 + ], + "spans": [ + { + "bbox": [ + 307, + 318, + 548, + 399 + ], + "type": "image", + "image_path": "29ce58b437a5916dea640e00806154a31abae29e90df52edf2d67809c927bd7a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 64, + 405, + 220, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 405, + 220, + 426 + ], + "spans": [ + { + "bbox": [ + 64, + 405, + 220, + 426 + ], + "type": "text", + "content": "Q: Please segment the person who is holding " + }, + { + "bbox": [ + 64, + 405, + 220, + 426 + ], + "type": "inline_equation", + "content": "\\langle \\text{vp\\_0} \\rangle" + }, + { + "bbox": [ + 64, + 405, + 220, + 426 + ], + "type": "text", + "content": ". A: [SEG]" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 239, + 413, + 514, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 413, + 514, + 426 + ], + "spans": [ + { + "bbox": [ + 239, + 413, + 514, + 426 + ], + "type": "text", + "content": "Q: Please segment the tree between " + }, + { + "bbox": [ + 239, + 413, + 514, + 426 + ], + "type": "inline_equation", + "content": "\\langle \\text{vp}_0 \\rangle" + }, + { + "bbox": [ + 239, + 413, + 514, + 426 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 239, + 413, + 514, + 426 + ], + "type": "inline_equation", + "content": "\\langle \\text{vp}_1 \\rangle" + }, + { + "bbox": [ + 239, + 413, + 514, + 426 + ], + "type": "text", + "content": ". A: [SEG]" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 63, + 427, + 211, + 538 + ], + "blocks": [ + { + "bbox": [ + 63, + 427, + 211, + 538 + ], + "lines": [ + { + "bbox": [ + 63, + 427, + 211, + 538 + ], + "spans": [ + { + "bbox": [ + 63, + 427, + 211, + 538 + ], + "type": "image", + "image_path": "88b02944cee3f7b4a58e0b3ffd50ae70f9295df610d6fcc16d105ed2bf1d5cd1.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 120, + 548, + 488, + 559 + ], + "lines": [ + { + "bbox": [ + 120, + 548, + 488, + 559 + ], + "spans": [ + { + "bbox": [ + 120, + 548, + 488, + 559 + ], + "type": "text", + "content": "Figure 7. More visualization examples of vision-text referring segmentation from our PerBench." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 229, + 432, + 545, + 538 + ], + "blocks": [ + { + "bbox": [ + 229, + 432, + 545, + 538 + ], + "lines": [ + { + "bbox": [ + 229, + 432, + 545, + 538 + ], + "spans": [ + { + "bbox": [ + 229, + 432, + 545, + 538 + ], + "type": "image", + "image_path": "cff4a47f203768f0842f1df33e93ff0f70dc45da26e5754a5f9e6cd0db753944.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 579, + 295, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 579, + 295, + 605 + ], + "spans": [ + { + "bbox": [ + 55, + 579, + 295, + 605 + ], + "type": "text", + "content": "9. More Comparison With SOTA Pixel-Grounded MLLM" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 55, + 624, + 296, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 624, + 296, + 732 + ], + "spans": [ + { + "bbox": [ + 55, + 624, + 296, + 732 + ], + "type": "text", + "content": "We conduct a qualitative comparative analysis with the SOTA pixel-grounded MLLM, Sa2VA [72], and present the visualization results in Fig. 8. We observe that both Pixel-SAIL and Sa2VA achieve excellent results in most cases. However, Sa2VA performs significantly weaker than Pixel-SAIL in certain scenarios, despite utilizing the much more powerful InternVL2.5 [10] compared to our base encoder-free MLLM [8]. In the left examples, Sa2VA performs notably worse than Pixel-SAIL in multi-object segmentation" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 581, + 555, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 581, + 555, + 640 + ], + "spans": [ + { + "bbox": [ + 313, + 581, + 555, + 640 + ], + "type": "text", + "content": "tasks. Additionally, in the right example, Sa2VA demonstrates significantly weaker attention to non-core areas of the image, such as edges, compared to Pixel-SAIL, leading to frequent failures in segmenting objects near image boundaries." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 350, + 97, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 350, + 97, + 392 + ], + "spans": [ + { + "bbox": [ + 61, + 350, + 97, + 392 + ], + "type": "text", + "content": "Please segment the two women." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 62, + 406, + 97, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 406, + 97, + 471 + ], + "spans": [ + { + "bbox": [ + 62, + 406, + 97, + 471 + ], + "type": "text", + "content": "Please segment all the mans in right side." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 100, + 331, + 202, + 400 + ], + "blocks": [ + { + "bbox": [ + 128, + 320, + 171, + 329 + ], + "lines": [ + { + "bbox": [ + 128, + 320, + 171, + 329 + ], + "spans": [ + { + "bbox": [ + 128, + 320, + 171, + 329 + ], + "type": "text", + "content": "Pixel-SAIL" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 100, + 331, + 202, + 400 + ], + "lines": [ + { + "bbox": [ + 100, + 331, + 202, + 400 + ], + "spans": [ + { + "bbox": [ + 100, + 331, + 202, + 400 + ], + "type": "image", + "image_path": "c6d0d68534eafff67d8719670cfe877faf018304f100380c905e6b410dd5f80d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 203, + 331, + 306, + 400 + ], + "blocks": [ + { + "bbox": [ + 241, + 320, + 269, + 329 + ], + "lines": [ + { + "bbox": [ + 241, + 320, + 269, + 329 + ], + "spans": [ + { + "bbox": [ + 241, + 320, + 269, + 329 + ], + "type": "text", + "content": "Sa2VA" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 203, + 331, + 306, + 400 + ], + "lines": [ + { + "bbox": [ + 203, + 331, + 306, + 400 + ], + "spans": [ + { + "bbox": [ + 203, + 331, + 306, + 400 + ], + "type": "image", + "image_path": "adbf3fb6b7b5c39d5eb61a072abe5dc979e86526a712b7224538858be92e6be9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 309, + 330, + 340, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 330, + 340, + 392 + ], + "spans": [ + { + "bbox": [ + 309, + 330, + 340, + 392 + ], + "type": "text", + "content": "Please segment the flower." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 100, + 404, + 203, + 473 + ], + "blocks": [ + { + "bbox": [ + 100, + 404, + 203, + 473 + ], + "lines": [ + { + "bbox": [ + 100, + 404, + 203, + 473 + ], + "spans": [ + { + "bbox": [ + 100, + 404, + 203, + 473 + ], + "type": "image", + "image_path": "1670b5aa11ea16ec862993f96f4c48d8d423050174dfe97332d793ccceb7b98f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 186, + 487, + 424, + 498 + ], + "lines": [ + { + "bbox": [ + 186, + 487, + 424, + 498 + ], + "spans": [ + { + "bbox": [ + 186, + 487, + 424, + 498 + ], + "type": "text", + "content": "Figure 8. Visualization Comparison of Sa2Va and Pixel-SAIL." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 203, + 404, + 306, + 473 + ], + "blocks": [ + { + "bbox": [ + 203, + 404, + 306, + 473 + ], + "lines": [ + { + "bbox": [ + 203, + 404, + 306, + 473 + ], + "spans": [ + { + "bbox": [ + 203, + 404, + 306, + 473 + ], + "type": "image", + "image_path": "aebf36c684ad3f3508655df02db8926f2ddf1297a2aafa83ac26c6da0953d95d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 309, + 403, + 339, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 403, + 339, + 479 + ], + "spans": [ + { + "bbox": [ + 309, + 403, + 339, + 479 + ], + "type": "text", + "content": "Please segment the person sitting on the floor." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 341, + 331, + 444, + 400 + ], + "blocks": [ + { + "bbox": [ + 366, + 319, + 410, + 329 + ], + "lines": [ + { + "bbox": [ + 366, + 319, + 410, + 329 + ], + "spans": [ + { + "bbox": [ + 366, + 319, + 410, + 329 + ], + "type": "text", + "content": "Pixel-SAIL" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 341, + 331, + 444, + 400 + ], + "lines": [ + { + "bbox": [ + 341, + 331, + 444, + 400 + ], + "spans": [ + { + "bbox": [ + 341, + 331, + 444, + 400 + ], + "type": "image", + "image_path": "86b860bdc87c86e23424a603ebb6acb9c8d1a465e1f1714a657528991691867d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 444, + 331, + 548, + 400 + ], + "blocks": [ + { + "bbox": [ + 444, + 331, + 548, + 400 + ], + "lines": [ + { + "bbox": [ + 444, + 331, + 548, + 400 + ], + "spans": [ + { + "bbox": [ + 444, + 331, + 548, + 400 + ], + "type": "image", + "image_path": "4617a9ba01a62e1555c8eda6e19463c940cd456014879986d1c05a09455702ec.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 341, + 404, + 444, + 475 + ], + "blocks": [ + { + "bbox": [ + 341, + 404, + 444, + 475 + ], + "lines": [ + { + "bbox": [ + 341, + 404, + 444, + 475 + ], + "spans": [ + { + "bbox": [ + 341, + 404, + 444, + 475 + ], + "type": "image", + "image_path": "5b4146250237eff7028b009970b91b0f8647976b0af9572a8aa9a1d2312d4f3c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 444, + 404, + 548, + 475 + ], + "blocks": [ + { + "bbox": [ + 444, + 404, + 548, + 475 + ], + "lines": [ + { + "bbox": [ + 444, + 404, + 548, + 475 + ], + "spans": [ + { + "bbox": [ + 444, + 404, + 548, + 475 + ], + "type": "image", + "image_path": "0b7ffde63325f0b51fb4beebc982298a3c7a6c5d44f1cdeb9a7612bf4dab0654.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 308, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 759 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 759 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file